diff --git a/.clang-format b/.clang-format index aff93435f5..8b58306273 100644 --- a/.clang-format +++ b/.clang-format @@ -19,7 +19,7 @@ BasedOnStyle: Google IndentWidth: 2 TabWidth: 2 ContinuationIndentWidth: 4 -AccessModifierOffset: -2 # The private/protected/public has no indent in class +AccessModifierOffset: -1 # The private/protected/public has no indent in class Standard: Cpp11 AllowAllParametersOfDeclarationOnNextLine: true BinPackParameters: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6140340890..e718b32cb6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -23,7 +23,7 @@ repos: - id: clang-format-with-version-check name: clang-format description: Format files with ClangFormat. - entry: bash ./.clang_format.hook -i + entry: bash ./tools/codestyle/clang_format.hook -i language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto)$ - repo: local @@ -34,6 +34,14 @@ repos: entry: bash ./tools/codestyle/cpplint_pre_commit.hook language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx)$ +- repo: local + hooks: + - id: pylint-doc-string + name: pylint + description: Check python docstring style using docstring_checker. + entry: bash ./tools/codestyle/pylint_pre_commit.hook + language: system + files: \.(py)$ - repo: https://github.com/PaddlePaddle/pre-commit-golang sha: 8337620115c25ff8333f1b1a493bd031049bd7c0 hooks: @@ -44,7 +52,7 @@ repos: hooks: - id: copyright_checker name: copyright_checker - entry: python ./.copyright.hook + entry: python ./tools/codestyle/copyright.hook language: system files: \.(c|cc|cxx|cpp|cu|h|hpp|hxx|proto|py)$ exclude: (?!.*third_party)^.*$ | (?!.*book)^.*$ diff --git a/.travis.yml b/.travis.yml index 3391e2c3ca..361136ac2c 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,8 @@ env: addons: ssh_known_hosts: 13.229.163.131 before_install: + # For pylint dockstring checker + - sudo pip install pylint pytest astroid isort - | function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } script: @@ -25,15 +27,6 @@ script: # 43min timeout paddle/scripts/paddle_docker_build.sh ${JOB} if [ $? -eq 0 ] || [ $? -eq 142 ]; then true; else exit 1; fi; - - | - if [[ "$JOB" != "doc" ]]; then exit 0; fi; - # For document only - if [[ "$TRAVIS_PULL_REQUEST" != "false" ]]; then exit 0; fi; - if [[ "$TRAVIS_BRANCH" != "develop" && ! "$TRAVIS_BRANCH" =~ ^v[[:digit:]]+\.[[:digit:]]+(\.[[:digit:]]+)?(-\S*)?$ ]]; then exit 0; fi; - export DEPLOY_DOCS_SH=https://raw.githubusercontent.com/PaddlePaddle/PaddlePaddle.org/master/scripts/deploy/deploy_docs.sh - export DOCS_DIR=`pwd` - cd .. - curl $DEPLOY_DOCS_SH | bash -s $CONTENT_DEC_PASSWD $TRAVIS_BRANCH $DOCS_DIR $DOCS_DIR/build/doc/ notifications: email: on_success: change diff --git a/AUTHORS.md b/AUTHORS.md index 4ee0542098..41b7193677 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -4,6 +4,7 @@ | backyes | Yan-Fei Wang | | baiyfbupt | Yi-Fan Bai | | beckett1124 | Bin Qi | +| ChengduoZH | Cheng-Duo Zhao| | chengxiaohua1105 | Xiao-Hua Cheng | | cxwangyi, yiwangbaidu, wangkuiyi | Yi Wang | | cxysteven | Xing-Yi Cheng | @@ -21,6 +22,7 @@ | jczaja | Jacek Czaja | | JiayiFeng | Jia-Yi Feng | | kbinias | Krzysztof Binias | +| kexinzhao | Ke-Xin Zhao | | kuke | Yi-Bing Liu | | lcy-seso | Ying Cao | | lipeng-unisound | Peng Li | @@ -44,6 +46,7 @@ | tianbingsz | Tian-Bing Xu | | tpatejko | Tomasz Patejko | | typhoonzero | Yi Wu | +| velconia | Qi-Yang Min | | wanghaoshuang | Hao-Shuang Wang | | wangyang59 | Yang Wang | | wangzhen-nlp | Zhen Wang | diff --git a/CMakeLists.txt b/CMakeLists.txt index 23bbe829ac..920c20d6f8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -25,7 +25,6 @@ message(STATUS "CXX compiler: ${CMAKE_CXX_COMPILER}, version: " message(STATUS "C compiler: ${CMAKE_C_COMPILER}, version: " "${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}") -find_package(Sphinx) if(NOT CMAKE_CROSSCOMPILING) find_package(CUDA QUIET) endif(NOT CMAKE_CROSSCOMPILING) @@ -42,7 +41,6 @@ option(WITH_MKL "Compile PaddlePaddle with MKL support." ${AVX_FO option(WITH_DSO "Compile PaddlePaddle with dynamic linked CUDA" ON) option(WITH_TESTING "Compile PaddlePaddle with unit testing" OFF) option(WITH_SWIG_PY "Compile PaddlePaddle with inference api" ON) -option(WITH_STYLE_CHECK "Compile PaddlePaddle with style check" ON) option(WITH_PYTHON "Compile PaddlePaddle with python interpreter" ON) option(WITH_DOUBLE "Compile PaddlePaddle with double precision" OFF) option(WITH_RDMA "Compile PaddlePaddle with RDMA support" OFF) @@ -57,10 +55,25 @@ option(WITH_FLUID_ONLY "Compile PaddlePaddle fluid only" OFF) option(WITH_GOLANG "Compile PaddlePaddle with GOLANG" OFF) option(GLIDE_INSTALL "Download and install go dependencies " ON) option(USE_NNPACK "Compile PaddlePaddle with NNPACK library" OFF) -option(WITH_DISTRIBUTE "Compile with grpc distributed support" OFF) +option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(USE_EIGEN_FOR_BLAS "Use matrix multiplication in Eigen" OFF) +option(EIGEN_USE_THREADS "Compile with multi-threaded Eigen" OFF) option(WITH_ARM_FP16 "Use half precision support on armv8.2-a cpu" OFF) option(WITH_FAST_BUNDLE_TEST "Bundle tests that can be run in a single process together to reduce launch overhead" OFF) +option(WITH_CONTRIB "Compile the third-party contributation" OFF) +option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF) +option(WITH_ANAKIN "Compile with Anakin library" OFF) +option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE}) +option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF) +option(WITH_INFERENCE "Compile fluid inference library" ON) +option(WITH_SYSTEM_BLAS "Use system blas library" OFF) +option(PY_VERSION "Compile PaddlePaddle with python3 support" ${PY_VERSION}) + +# PY_VERSION +if(NOT PY_VERSION) + set(PY_VERSION 2.7) +endif() +set(PYBIND11_PYTHON_VERSION ${PY_VERSION}) # CMAKE_BUILD_TYPE if(NOT CMAKE_BUILD_TYPE) @@ -98,9 +111,17 @@ if(ANDROID OR IOS) add_definitions(-DPADDLE_MOBILE_INFERENCE) endif() +if (APPLE OR WIN32) + set(WITH_MKL OFF CACHE STRING + "Disable MKL for building on mac and windows" FORCE) +endif() + set(THIRD_PARTY_PATH "${CMAKE_BINARY_DIR}/third_party" CACHE STRING "A path setting third party libraries download & build directories.") +set(FLUID_INSTALL_DIR "${CMAKE_BINARY_DIR}/fluid_install_dir" CACHE STRING + "A path setting fluid shared and static libraries") + if (WITH_C_API AND WITH_PYTHON) message(WARNING "It is suggest not embedded a python interpreter in Paddle " "when using C-API. It will give an unpredictable behavior when using a " @@ -117,17 +138,30 @@ else() set(THIRD_PARTY_BUILD_TYPE Release) endif() +if(WITH_MKL) + option(MKL_SPLIT_GEMM "PaddlePaddle MKL gemm would split to small ones" OFF) + if (MKL_SPLIT_GEMM) + add_definitions(-DPADDLE_MKL_SPLIT_GEMM) + endif() +endif() set(WITH_MKLML ${WITH_MKL}) -if (WITH_MKL AND AVX2_FOUND) - set(WITH_MKLDNN ON) -else() - message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN") - set(WITH_MKLDNN OFF) +if (NOT DEFINED WITH_MKLDNN) + if (WITH_MKL AND AVX2_FOUND) + set(WITH_MKLDNN ON) + else() + message(STATUS "Do not have AVX2 intrinsics and disabled MKL-DNN") + set(WITH_MKLDNN OFF) + endif() endif() +if (REPLACE_ENFORCE_GLOG) + add_definitions("-DREPLACE_ENFORCE_GLOG") +endif() ######################################################################################## include(external/mklml) # download mklml package +include(external/xbyak) # download xbyak package +include(external/libxsmm) # download, build, install libxsmm include(external/zlib) # download, build, install zlib include(external/gflags) # download, build, install gflags include(external/glog) # download, build, install glog @@ -143,17 +177,45 @@ include(external/any) # download libn::any include(external/eigen) # download eigen3 include(external/pybind11) # download pybind11 include(external/cares) -include(external/grpc) +include(external/cub) + +if(WITH_DISTRIBUTE) + if(WITH_GRPC) + include(external/grpc) + message(STATUS "Use grpc framework.") + else() + message(STATUS "Use brpc framework.") + include(external/leveldb) + include(external/brpc) + endif() +endif() + +if(WITH_BRPC_RDMA) + message(STATUS "Use brpc with rdma.") + if(WITH_GRPC) + message(FATAL_ERROR "Can't use grpc with brpc rdma.") + endif() + if(NOT WITH_DISTRIBUTE) + message(FATAL_ERROR "Can't use brpc rdma in no distribute env.") + endif() +endif() + include(external/snappy) # download snappy include(external/snappystream) include(external/threadpool) +set(WITH_ANAKIN OFF CACHE STRING "Disable Anakin first, will add it later." FORCE) +if(WITH_GPU) + include(cuda) + include(tensorrt) + include(external/anakin) +endif() + include(cudnn) # set cudnn libraries, must before configure include(cupti) include(configure) # add paddle env configuration include(generic) # simplify cmake module include(package) # set paddle packages -include(cpplint) # set paddle c++ style include(ccache) # set ccache for compilation include(util) # set unittest and link libs include(rdma) # set rdma libraries @@ -164,7 +226,7 @@ include(inference_lib) # add paddle fluid inference libraries include_directories("${PADDLE_SOURCE_DIR}") -include_directories("${PADDLE_SOURCE_DIR}/paddle/cuda/include") +include_directories("${PADDLE_SOURCE_DIR}/paddle/legacy/cuda/include") include_directories("${CMAKE_CURRENT_BINARY_DIR}/proto") include_directories("${CMAKE_CURRENT_BINARY_DIR}/go/pserver/client/c") @@ -177,11 +239,6 @@ set(EXTERNAL_LIBS ${PYTHON_LIBRARIES} ) -if(WITH_GPU) - include(cuda) - include(tensorrt) -endif(WITH_GPU) - if(WITH_AMD_GPU) find_package(HIP) include(hip) @@ -191,6 +248,10 @@ if(WITH_MKLML) list(APPEND EXTERNAL_LIBS ${MKLML_IOMP_LIB}) endif() +if(WITH_LIBXSMM) + list(APPEND EXTERNAL_LIBS ${LIBXSMM_LIBS}) +endif() + if(WITH_MKLDNN) list(APPEND EXTERNAL_LIBS ${MKLDNN_LIB}) endif() @@ -202,10 +263,10 @@ endif(USE_NNPACK) add_subdirectory(proto) -if(NOT MOBILE_INFERENCE) +if(NOT MOBILE_INFERENCE AND NOT WITH_FLUID_ONLY) # "add_subdirectory(go)" should be placed after the following loine, # because it depends on paddle/optimizer. - add_subdirectory(paddle/optimizer) + add_subdirectory(paddle/legacy/optimizer) endif() # "add_subdirectory(paddle)" and "add_subdirectory(python)" should be @@ -226,5 +287,7 @@ if(WITH_PYTHON) endif() if(WITH_DOC) + find_package(Sphinx REQUIRED) + find_python_module(recommonmark REQUIRED) add_subdirectory(doc) endif() diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3c36cffcb4..b878f37a5b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -58,6 +58,8 @@ PaddlePaddle uses this [Git branching model](http://nvie.com/posts/a-successful- create mode 100644 233 ``` + NOTE: The `yapf` installed by `pip install pre-commit` and `conda install -c conda-forge pre-commit` is slightly different. Paddle developers use `pip install pre-commit`. + 1. Build and test Users can build PaddlePaddle natively on Linux and Mac OS X. But to unify the building environment and to make it easy for debugging, the recommended way is [using Docker](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/build_en.md). @@ -157,4 +159,4 @@ This will enable VLOG messages generated by `buddy_allocator.{h,cc}` and in the - verbose level 1: [framework](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/framework) - verbose level 3: [operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/operators) - verbose level 5: [memory](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/memory), [platform](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/platform) -- verbose level 7: [math](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/math) +- verbose level 7: [math](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/math) diff --git a/Dockerfile b/Dockerfile index 164fe84904..402adee2ea 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,13 +23,13 @@ ENV HOME /root COPY ./paddle/scripts/docker/root/ /root/ RUN apt-get update && \ - apt-get install -y --allow-downgrades \ - git python-pip python-dev openssh-server bison \ + apt-get install -y --allow-downgrades patchelf \ + git python-pip python-dev python-opencv openssh-server bison \ libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 \ wget unzip unrar tar xz-utils bzip2 gzip coreutils ntp \ curl sed grep graphviz libjpeg-dev zlib1g-dev \ python-matplotlib gcc-4.8 g++-4.8 \ - automake locales clang-format swig doxygen cmake \ + automake locales clang-format swig cmake \ liblapack-dev liblapacke-dev \ clang-3.8 llvm-3.8 libclang-3.8-dev \ net-tools libtool ccache && \ @@ -70,7 +70,7 @@ RUN localedef -i en_US -f UTF-8 en_US.UTF-8 # specify sphinx version as 1.5.6 and remove -U option for [pip install -U # sphinx-rtd-theme] since -U option will cause sphinx being updated to newest # version(1.7.1 for now), which causes building documentation failed. -RUN pip install --upgrade pip==9.0.3 && \ +RUN easy_install -U pip && \ pip install -U wheel && \ pip install -U docopt PyYAML sphinx==1.5.6 && \ pip install sphinx-rtd-theme==0.1.9 recommonmark @@ -79,6 +79,9 @@ RUN pip install pre-commit 'ipython==5.3.0' && \ pip install 'ipykernel==4.6.0' 'jupyter==1.0.0' && \ pip install opencv-python +#For docstring checker +RUN pip install pylint pytest astroid isort LinkChecker + COPY ./python/requirements.txt /root/ RUN pip install -r /root/requirements.txt @@ -101,6 +104,3 @@ RUN echo 'root:root' | chpasswd RUN sed -ri 's/^PermitRootLogin\s+.*/PermitRootLogin yes/' /etc/ssh/sshd_config RUN sed -ri 's/UsePAM yes/#UsePAM yes/g' /etc/ssh/sshd_config EXPOSE 22 - -# development image default do build work -CMD ["bash", "/paddle/paddle/scripts/docker/build.sh"] diff --git a/Dockerfile.android b/Dockerfile.android index 848a7eba6f..48db2efea2 100644 --- a/Dockerfile.android +++ b/Dockerfile.android @@ -40,5 +40,3 @@ RUN mkdir -p ${ANDROID_TOOLCHAINS_DIR} && \ unzip -q android-ndk-r14b-linux-x86_64.zip && \ mv android-ndk-r14b ${ANDROID_NDK_HOME} && \ rm -rf /opt/android-ndk-tmp - -CMD ["bash", "/paddle/paddle/scripts/docker/build_android.sh"] diff --git a/README.md b/README.md index a3b13fe79c..a67cb8ad43 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ [![Build Status](https://travis-ci.org/PaddlePaddle/Paddle.svg?branch=develop)](https://travis-ci.org/PaddlePaddle/Paddle) [![Documentation Status](https://img.shields.io/badge/docs-latest-brightgreen.svg?style=flat)](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/index_en.html) [![Documentation Status](https://img.shields.io/badge/中文文档-最新-brightgreen.svg)](http://www.paddlepaddle.org/docs/develop/documentation/zh/getstarted/index_cn.html) -[![Coverage Status](https://coveralls.io/repos/github/PaddlePaddle/Paddle/badge.svg?branch=develop)](https://coveralls.io/github/PaddlePaddle/Paddle?branch=develop) [![Release](https://img.shields.io/github/release/PaddlePaddle/Paddle.svg)](https://github.com/PaddlePaddle/Paddle/releases) [![License](https://img.shields.io/badge/license-Apache%202-blue.svg)](LICENSE) @@ -19,6 +18,22 @@ learning to many products at Baidu. Our vision is to enable deep learning for everyone via PaddlePaddle. Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddle/releases) to track the latest feature of PaddlePaddle. + +### Latest PaddlePaddle Release: [Fluid 0.14.0](https://github.com/PaddlePaddle/Paddle/tree/v0.14.0) +### Install Latest Stable Release: +``` +# Linux CPU +pip install paddlepaddle +# Linux GPU cuda9cudnn7 +pip install paddlepaddle-gpu +# Linux GPU cuda8cudnn7 +pip install paddlepaddle-gpu==0.14.0.post87 +# Linux GPU cuda8cudnn5 +pip install paddlepaddle-gpu==0.14.0.post85 + +# For installation on other platform, refer to http://paddlepaddle.org/ +``` + ## Features - **Flexibility** @@ -62,9 +77,9 @@ Please refer to our [release announcement](https://github.com/PaddlePaddle/Paddl ## Installation It is recommended to check out the -[Docker installation guide](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/docker_install_en.html) +[Docker installation guide](http://www.paddlepaddle.org/docs/develop/documentation/fluid/en/build_and_install/docker_install_en.html) before looking into the -[build from source guide](http://www.paddlepaddle.org/docs/develop/documentation/en/getstarted/build_and_install/build_from_source_en.html). +[build from source guide](http://www.paddlepaddle.org/docs/develop/documentation/fluid/en/build_and_install/build_from_source_en.html). ## Documentation diff --git a/benchmark/.gitignore b/benchmark/.gitignore index 7b66e8a5b5..fb4114356d 100644 --- a/benchmark/.gitignore +++ b/benchmark/.gitignore @@ -7,3 +7,6 @@ paddle/rnn/imdb.pkl caffe/image/logs tensorflow/image/logs tensorflow/rnn/logs +fluid/models/*.pyc +fluid/logs +fluid/nohup.out diff --git a/benchmark/cluster/README.md b/benchmark/cluster/README.md deleted file mode 100644 index 64816098a5..0000000000 --- a/benchmark/cluster/README.md +++ /dev/null @@ -1,196 +0,0 @@ -# Cluster Training Benchmark - -## Setup - -- Platform - - Kubernetes: v1.6.2 - - Linux Kernel: v3.10.0 - -- Resource - - CPU: 10 Cores per Pod - - Memory: 5GB per Pod - -- Docker Image - - We use different base Docker Image to run the benchmark on Kubernetes: - - PaddlePaddle v2: paddlepaddle/paddle:0.11.0 - - PaddlePaddle Fluid: paddlepaddle/paddle:[commit-id] - - TensorFlow: tensorflow/tensorflow:1.5.0-rc0 - -- Model - vgg16 is used in this benchmark. - -## Cases - -- Variable - - Batch Size of training data. - - PServer count of the training job. - - The number of trainers. - -- Invariant - - The resource of trainer/pserver Pod. - -### Measure the Performance for Different Batch Size - -- PServer Count: 40 -- Trainer Count: 100 -- Metrics: mini-batch / sec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Batch Size 3264128 256
PaddlePaddle Fluid-- - -
PaddlePaddle v2 - - - -
TensorFlow - - - -
- -### Measure the Performance for Different PServer Count - -- Trainer Count: 100 -- Batch Size: 64 -- Metrics: mini-batch / sec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PServer Count 102040 60
PaddlePaddle Fluid-- - -
PaddlePaddle v2 - - - -
TensorFlow - - - -
- -### Measure Parallel Efficiency By Increasing Trainer Count - -- PServer Count: 20 -- Batch Size: 64 -- Metrics: - -$S = \div(T1, TN)$ - -which S is the ratio of T1 over TN, training time of 1 and N trainers. -The parallel efficiency is: - -$E = \div(S, N)$ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Trainer Counter 11020 30405060 708090100
PaddlePaddle Fluid-- - - -- - - -- -
PaddlePaddle v2 - - - - -- - - -- -
TensorFlow - - - - -- - - -- -
- - -## Reproduce the benchmark - -TODO diff --git a/benchmark/cluster/vgg16/Dockerfile b/benchmark/cluster/vgg16/Dockerfile deleted file mode 100644 index 13ad8e1b62..0000000000 --- a/benchmark/cluster/vgg16/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -FROM nvidia/cuda:8.0-cudnn5-runtime-ubuntu16.04 - -# you can get mirror list here: -# https://launchpad.net/ubuntu/+archivemirrors -ARG UBUNTU_MIRROR -RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' - -RUN apt-get update && apt-get install -y python python-dev python-pip iputils-ping libgtk2.0-dev -RUN pip install -U kubernetes opencv-python - -RUN pip install paddlepaddle -# if network is slowly, you may need to add proxy here. -# ENV https_proxy= -RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.cifar.train10()" | python' -RUN pip uninstall -y paddlepaddle -# unset proxy if it is setted. -# ENV https_proxy="" - -# NOTE: By default CI built wheel packages turn WITH_DISTRIBUTE=OFF, -# so we must build one with distribute support to install in this image. -ADD *.whl / -RUN pip install /*.whl && rm -f /*.whl -ENV LD_LIBRARY_PATH=/usr/local/lib - -# tf k8s -RUN pip install tensorflow==1.4.0 -ADD tf_k8s /usr/bin -RUN chmod +x /usr/bin/tf_k8s -ADD vgg16_tf.py /workspace/ - -# below lines may change a lot for debugging -ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/paddle_k8s /usr/bin -ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/k8s_tools.py /root -RUN chmod +x /usr/bin/paddle_k8s -ADD vgg16_fluid.py vgg16_v2.py /workspace/ diff --git a/benchmark/cluster/vgg16/README.md b/benchmark/cluster/vgg16/README.md deleted file mode 100644 index d56a912b9b..0000000000 --- a/benchmark/cluster/vgg16/README.md +++ /dev/null @@ -1,195 +0,0 @@ -# Performance for Distributed vgg16 - -## Test Result - -### Hardware Infomation - -- CPU: Intel(R) Xeon(R) CPU E5-2620 v4 @ 2.10GHz -- cpu MHz : 2101.000 -- cache size : 20480 KB - -### Blas settings - -Setting environment variable: `MKL_NUM_THREADS=1`. - -### Single Node Single Thread - -- Metrics: samples / sec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Batch Size 3264128 256
PaddlePaddle Fluid 15.44 16.32 16.74 16.79
PaddlePaddle v2 15.97 17.04 17.60 17.83
TensorFlow 9.09 9.10 9.24 8.66
- - -### Different Batch Size - -- PServer Count: 10 -- Trainer Count: 20 -- Metrics: samples / sec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Batch Size 3264128 256
PaddlePaddle Fluid 190.20 222.15 247.40 258.18
PaddlePaddle v2 170.96 233.71 256.14 329.23
TensorFlow - - - -
- -### Accelerate Rate - -- Pserver Count: 20 -- Batch Size: 128 -- Metrics: samples / sec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Trainer Count 204080100
PaddlePaddle Fluid 263.29 (78.64%) 518.80 (77.47%) 836.26 (62.44%) 1019.29 (60.89%)
PaddlePaddle v2 (need more tests) 326.85 (92.85%) 534.58 (75.93%) 853.30 (60.60%) 1041.99 (59.20%)
TensorFlow - - - -
- - -### Different Pserver Count - -- Trainer Count: 60 -- Batch Size: 128 -- Metrics: samples/ sec - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
PServer Count 361020
PaddlePaddle Fluid(should fix in next PR) 589.1 592.6 656.4 655.8
PaddlePaddle v2 (need more tests) 593.4 791.3 729.7 821.7
TensorFlow - - - -
- - -*The performance gap between Fuild and v2 comes from the network interference.* - - -## Steps to Run the Performance Test - -1. You must re-compile PaddlePaddle and enable `-DWITH_DISTRIBUTE` to build PaddlePaddle with distributed support. -1. When the build finishes, copy the output `whl` package located under `build/python/dist` to current directory. -1. Run `docker build -t [image:tag] .` to build the docker image and run `docker push [image:tag]` to push the image to reponsitory so kubernetes can find it. -1. Run `kubectl create -f pserver.yaml && kubectl create -f trainer.yaml` to start the job on your kubernetes cluster (you must configure the `kubectl` client before this step). -1. Run `kubectl get po` to get running pods, and run `kubectl logs [podID]` to fetch the pod log of pservers and trainers. - -Check the logs for the distributed training progress and analyze the performance. - -## Enable Verbos Logs - -Edit `pserver.yaml` and `trainer.yaml` and add an environment variable `GLOG_v=3` and `GLOG_logtostderr=1` to see what happend in detail. diff --git a/benchmark/cluster/vgg16/fluid_pserver.yaml b/benchmark/cluster/vgg16/fluid_pserver.yaml deleted file mode 100644 index ee8b0763b6..0000000000 --- a/benchmark/cluster/vgg16/fluid_pserver.yaml +++ /dev/null @@ -1,72 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: ReplicaSet -metadata: - name: vgg16job-pserver -spec: - replicas: 10 - template: - metadata: - labels: - paddle-job-pserver: vgg16job - spec: - hostNetwork: true - imagePullSecrets: - - name: job-registry-secret - containers: - - name: pserver - image: "registry.baidu.com/paddlepaddle/fluid_benchmark:vgg16" - imagePullPolicy: Always - ports: - - name: jobport-30236 - containerPort: 30236 - env: - - name: PADDLE_JOB_NAME - value: vgg16job - - name: MKL_NUM_THREADS - value: "1" - - name: TRAINING_ROLE - value: "PSERVER" - - name: TRAINERS - value: "20" - - name: PSERVERS - value: "10" - - name: TOPOLOGY - value: "" - - name: ENTRY - value: "MKL_NUM_THREADS=1 python /workspace/vgg16_fluid.py --local 0" - - name: TRAINER_PACKAGE - value: "/workspace" - - name: PADDLE_INIT_PORT - value: "30236" - - name: PADDLE_INIT_NICS - value: "xgbe0" - - name: PADDLE_INIT_TRAINER_COUNT - value: "1" - - name: PADDLE_INIT_PORTS_NUM - value: "1" - - name: PADDLE_INIT_PORTS_NUM_FOR_SPARSE - value: "1" - - name: PADDLE_INIT_NUM_GRADIENT_SERVERS - value: "20" - - name: PADDLE_INIT_NUM_PASSES - value: "1" - - name: PADDLE_INIT_USE_GPU - value: "0" - - name: LD_LIBRARY_PATH - value: "/usr/local/lib:/usr/local/nvidia/lib64" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: "metadata.namespace" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: "status.podIP" - command: ["paddle_k8s", "start_fluid"] - resources: - requests: - memory: 10Gi - cpu: 4 - limits: - memory: 10Gi - cpu: 4 diff --git a/benchmark/cluster/vgg16/fluid_trainer.yaml b/benchmark/cluster/vgg16/fluid_trainer.yaml deleted file mode 100644 index 3d56caac00..0000000000 --- a/benchmark/cluster/vgg16/fluid_trainer.yaml +++ /dev/null @@ -1,69 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: vgg16job-trainer -spec: - parallelism: 20 - completions: 20 - template: - metadata: - labels: - paddle-job: vgg16job - spec: - imagePullSecrets: - - name: job-registry-secret - hostNetwork: true - containers: - - name: trainer - image: "registry.baidu.com/paddlepaddle/fluid_benchmark:vgg16" - imagePullPolicy: Always - command: ["paddle_k8s", "start_fluid"] - env: - - name: PADDLE_JOB_NAME - value: vgg16job - - name: TRAINING_ROLE - value: "TRAINER" - - name: TRAINERS - value: "20" - - name: PSERVERS - value: "10" - - name: TOPOLOGY - value: "" - - name: ENTRY - value: "MKL_NUM_THREADS=1 python /workspace/vgg16_fluid.py --local 0 --batch_size 128" - - name: TRAINER_PACKAGE - value: "/workspace" - - name: PADDLE_INIT_PORT - value: "30236" - - name: PADDLE_INIT_NICS - value: "xgbe0" - - name: PADDLE_INIT_TRAINER_COUNT - value: "1" - - name: PADDLE_INIT_PORTS_NUM - value: "1" - - name: PADDLE_INIT_PORTS_NUM_FOR_SPARSE - value: "1" - - name: PADDLE_INIT_NUM_GRADIENT_SERVERS - value: "20" - - name: PADDLE_INIT_NUM_PASSES - value: "1" - - name: PADDLE_INIT_USE_GPU - value: "0" - - name: LD_LIBRARY_PATH - value: "/usr/local/lib:/usr/local/nvidia/lib64" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: "metadata.namespace" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: "status.podIP" - resources: - requests: - memory: 40Gi - cpu: 2 - limits: - memory: 40Gi - cpu: 2 - restartPolicy: Never diff --git a/benchmark/cluster/vgg16/run_vgg_dist.sh b/benchmark/cluster/vgg16/run_vgg_dist.sh deleted file mode 100644 index 8c0501439e..0000000000 --- a/benchmark/cluster/vgg16/run_vgg_dist.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -# Update to point to the source file. -VGG_SRC="vgg16_fluid.py" - -export TRAINING_ROLE=PSERVER -export TRAINERS=2 -export POD_IP=127.0.0.1 -export PADDLE_INIT_PORT=6174 -MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 & - -# Need to wait for the ps to start first. -sleep 10 -echo "done start ps" - -export TRAINING_ROLE=TRAINER -export TRAINERS=2 -export POD_IP=127.0.0.1 -export PADDLE_INIT_PORT=6174 -CUDA_VISIBLE_DEVICES=4 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=0 & -CUDA_VISIBLE_DEVICES=5 MKL_NUM_THREADS=1 python -u ${VGG_SRC} --local 0 --ps_host=127.0.0.1:6174 --trainer_hosts=127.0.0.1:6174 --device=GPU --task_index=1 & diff --git a/benchmark/cluster/vgg16/tf_k8s b/benchmark/cluster/vgg16/tf_k8s deleted file mode 100644 index 4fc263d5f6..0000000000 --- a/benchmark/cluster/vgg16/tf_k8s +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash -check_trainer_ret() { - ret=$1 - stdbuf -oL echo "job returned $ret...setting pod return message..." - stdbuf -oL echo "===============================" - - if [ $ret -eq 136 ] ; then - echo "Error Arithmetic Operation(Floating Point Exception)" > /dev/termination-log - elif [ $ret -eq 139 ] ; then - echo "Segmentation Fault" > /dev/termination-log - elif [ $ret -eq 1 ] ; then - echo "General Error" > /dev/termination-log - elif [ $ret -eq 134 ] ; then - echo "Program Abort" > /dev/termination-log - fi - stdbuf -oL echo "termination log wroted..." - exit $ret -} - -g_pservers="" -g_trainers="" - -wait_running_pods(){ - pserver_label="tf-job-pserver=${JOB_NAME}" - trainer_label="tf-job-trainer=${JOB_NAME}" - - stdbuf -oL python /root/k8s_tools.py wait_pods_running ${pserver_label} ${PSERVERS_NUM} - stdbuf -oL python /root/k8s_tools.py wait_pods_running ${trainer_label} ${TRAINERS_NUM} - - g_pservers=$(python /root/k8s_tools.py fetch_endpoints ${pserver_label} ${PORT}) - g_trainers=$(python /root/k8s_tools.py fetch_endpoints ${trainer_label} ${PORT}) -} - -start_tf_pserver(){ - wait_running_pods - - label="tf-job-pserver=${JOB_NAME}" - pserver_id=$(python /root/k8s_tools.py fetch_id ${label}) - - cmd="${ENTRY} --ps_hosts=${g_pservers} --worker_hosts=${g_trainers} \ - --job_name=${TF_JOB_NAME} --task_index=${pserver_id}" - - stdbuf -oL sh -c "cd ${TRAINER_PACKAGE} && ${cmd}" -} - -start_tf_trainer(){ - wait_running_pods - - label="tf-job-trainer=${JOB_NAME}" - trainer_id=$(python /root/k8s_tools.py fetch_id ${label}) - - cmd="${ENTRY} --ps_hosts=${g_pservers} --worker_hosts=${g_trainers} \ - --job_name=${TF_JOB_NAME} --task_index=${trainer_id} --batch_size=${BATCH_SIZE}" - - stdbuf -oL sh -c "cd ${TRAINER_PACKAGE} && ${cmd}" - check_trainer_ret $? -} - -start_tf(){ - if [[ "${TF_JOB_NAME}" == "worker" ]]; then - start_tf_trainer - else - start_tf_pserver - fi -} - -usage() { - echo "usage: tf_k8s []:" - echo " start_tf Start tensorflow jobs" -} - -case "$1" in - start_tf) - start_tf - ;; - --help) - usage - ;; - *) - usage - ;; -esac diff --git a/benchmark/cluster/vgg16/tf_pserver.yaml b/benchmark/cluster/vgg16/tf_pserver.yaml deleted file mode 100644 index 5e37c70081..0000000000 --- a/benchmark/cluster/vgg16/tf_pserver.yaml +++ /dev/null @@ -1,56 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: ReplicaSet -metadata: - name: vgg16job-tf-pserver -spec: - replicas: 10 - template: - metadata: - labels: - tf-job-pserver: vgg16job-tf - spec: - hostNetwork: true - imagePullSecrets: - - name: job-registry-secret - containers: - - name: pserver - image: "registry.baidu.com/paddlepaddle/fluid_benchmark_tf:vgg16" - imagePullPolicy: Always - command: ["tf_k8s", "start_tf"] - ports: - - name: jobport-30236 - containerPort: 30236 - env: - - name: PORT - value: "32036" - - name: ENTRY - value: "python vgg16_tf.py" - - name: JOB_NAME - value: vgg16job-tf - - name: PSERVERS_NUM - value: "10" - - name: TF_JOB_NAME - value: "ps" - - name: TRAINERS_NUM - value: "20" - - name: BATCH_SIZE - value: "128" - - name: TRAINER_PACKAGE - value: "/workspace" - - name: NUM_PASSES - value: "1" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: "metadata.namespace" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: "status.podIP" - resources: - requests: - memory: 10Gi - cpu: 4 - limits: - memory: 10Gi - cpu: 4 diff --git a/benchmark/cluster/vgg16/tf_trainer.yaml b/benchmark/cluster/vgg16/tf_trainer.yaml deleted file mode 100644 index 08795df3ad..0000000000 --- a/benchmark/cluster/vgg16/tf_trainer.yaml +++ /dev/null @@ -1,58 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: vgg16job-tf-trainer -spec: - parallelism: 20 - completions: 20 - template: - metadata: - labels: - tf-job-trainer: vgg16job-tf - spec: - imagePullSecrets: - - name: job-registry-secret - hostNetwork: true - containers: - - name: trainer - image: "registry.baidu.com/paddlepaddle/fluid_benchmark_tf:vgg16" - imagePullPolicy: Always - command: ["tf_k8s", "start_tf"] - ports: - - name: jobport-30236 - containerPort: 30236 - env: - - name: PORT - value: "32036" - - name: JOB_NAME - value: vgg16job-tf - - name: TF_JOB_NAME - value: "worker" - - name: ENTRY - value: "python vgg16_tf.py" - - name: PSERVERS_NUM - value: "10" - - name: BATCH_SIZE - value: "128" - - name: TRAINERS_NUM - value: "20" - - name: TRAINER_PACKAGE - value: "/workspace" - - name: NUM_PASSES - value: "1" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: "metadata.namespace" - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: "status.podIP" - resources: - requests: - memory: 40Gi - cpu: 2 - limits: - memory: 40Gi - cpu: 2 - restartPolicy: Never diff --git a/benchmark/cluster/vgg16/v2_pserver.yaml b/benchmark/cluster/vgg16/v2_pserver.yaml deleted file mode 100644 index dd1271e0cf..0000000000 --- a/benchmark/cluster/vgg16/v2_pserver.yaml +++ /dev/null @@ -1,64 +0,0 @@ -apiVersion: extensions/v1beta1 -kind: ReplicaSet -metadata: - name: vgg16v2job-pserver -spec: - replicas: 10 - template: - metadata: - labels: - paddle-job-pserver: vgg16v2job - spec: - hostNetwork: true - imagePullSecrets: - - name: job-registry-secret - containers: - - name: pserver - image: "registry.baidu.com/paddlepaddle/fluid_benchmark:vgg16" - imagePullPolicy: Always - ports: - - name: jobport-30236 - containerPort: 30236 - env: - - name: PADDLE_JOB_NAME - value: vgg16v2job - - name: TRAINERS - value: "20" - - name: PSERVERS - value: "10" - - name: TOPOLOGY - value: "" - - name: ENTRY - value: "python train.py" - - name: TRAINER_PACKAGE - value: "/workspace" - - name: PADDLE_INIT_PORT - value: "30236" - - name: PADDLE_INIT_NICS - value: "xgbe0" - - name: PADDLE_INIT_TRAINER_COUNT - value: "1" - - name: PADDLE_INIT_PORTS_NUM - value: "1" - - name: PADDLE_INIT_PORTS_NUM_FOR_SPARSE - value: "1" - - name: PADDLE_INIT_NUM_GRADIENT_SERVERS - value: "20" - - name: PADDLE_INIT_NUM_PASSES - value: "1" - - name: PADDLE_INIT_USE_GPU - value: "0" - - name: LD_LIBRARY_PATH - value: "/usr/local/lib:/usr/local/nvidia/lib64" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: "metadata.namespace" - command: ["paddle_k8s", "start_pserver"] - resources: - requests: - memory: 10Gi - cpu: 4 - limits: - memory: 10Gi - cpu: 4 diff --git a/benchmark/cluster/vgg16/v2_trainer.yaml b/benchmark/cluster/vgg16/v2_trainer.yaml deleted file mode 100644 index 12c8964066..0000000000 --- a/benchmark/cluster/vgg16/v2_trainer.yaml +++ /dev/null @@ -1,65 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: vgg16v2job-trainer -spec: - parallelism: 20 - completions: 20 - template: - metadata: - labels: - paddle-job: vgg16v2job - spec: - imagePullSecrets: - - name: job-registry-secret - hostNetwork: true - containers: - - name: trainer - image: "registry.baidu.com/paddlepaddle/fluid_benchmark:vgg16" - imagePullPolicy: Always - command: ["paddle_k8s", "start_trainer", "v2"] - env: - - name: PADDLE_JOB_NAME - value: vgg16v2job - - name: BATCH_SIZE - value: "256" - - name: TRAINERS - value: "20" - - name: PSERVERS - value: "10" - - name: TOPOLOGY - value: "" - - name: ENTRY - value: "cd /workspace && MKL_NUM_THREADS=1 python /workspace/vgg16_v2.py" - - name: TRAINER_PACKAGE - value: "/workspace" - - name: PADDLE_INIT_PORT - value: "30236" - - name: PADDLE_INIT_NICS - value: "xgbe0" - - name: PADDLE_INIT_TRAINER_COUNT - value: "1" - - name: PADDLE_INIT_PORTS_NUM - value: "1" - - name: PADDLE_INIT_PORTS_NUM_FOR_SPARSE - value: "1" - - name: PADDLE_INIT_NUM_GRADIENT_SERVERS - value: "20" - - name: PADDLE_INIT_NUM_PASSES - value: "2" - - name: PADDLE_INIT_USE_GPU - value: "0" - - name: LD_LIBRARY_PATH - value: "/usr/local/lib:/usr/local/nvidia/lib64" - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: "metadata.namespace" - resources: - requests: - memory: 40Gi - cpu: 2 - limits: - memory: 40Gi - cpu: 2 - restartPolicy: Never diff --git a/benchmark/cluster/vgg16/vgg16_fluid.py b/benchmark/cluster/vgg16/vgg16_fluid.py deleted file mode 100644 index 05b5f3977c..0000000000 --- a/benchmark/cluster/vgg16/vgg16_fluid.py +++ /dev/null @@ -1,308 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""VGG16 benchmark in Fluid""" -from __future__ import print_function - -import sys -import time -import numpy as np -import paddle.v2 as paddle -import paddle.fluid as fluid -import paddle.fluid.core as core -import paddle.fluid.profiler as profiler -import argparse -import functools -import os -from paddle.fluid import debuger - - -def str2bool(v): - if v.lower() in ('yes', 'true', 't', 'y', '1'): - return True - elif v.lower() in ('no', 'false', 'f', 'n', '0'): - return False - else: - raise argparse.ArgumentTypeError('Boolean value expected.') - - -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument( - '--batch_size', type=int, default=128, help="Batch size for training.") -parser.add_argument( - '--learning_rate', - type=float, - default=1e-3, - help="Learning rate for training.") -parser.add_argument('--num_passes', type=int, default=50, help="No. of passes.") -parser.add_argument( - '--device', - type=str, - default='CPU', - choices=['CPU', 'GPU'], - help="The device type.") -parser.add_argument('--device_id', type=int, default=0, help="The device id.") -parser.add_argument( - '--data_format', - type=str, - default='NCHW', - choices=['NCHW', 'NHWC'], - help='The data order, now only support NCHW.') -parser.add_argument( - '--data_set', - type=str, - default='cifar10', - choices=['cifar10', 'flowers'], - help='Optional dataset for benchmark.') -parser.add_argument( - '--local', - type=str2bool, - default=True, - help='Whether to run as local mode.') - -parser.add_argument( - "--ps_hosts", - type=str, - default="", - help="Comma-separated list of hostname:port pairs") -parser.add_argument( - "--trainer_hosts", - type=str, - default="", - help="Comma-separated list of hostname:port pairs") -parser.add_argument( - "--profile", action='store_true', help="If set, profile a few steps.") - -# Flags for defining the tf.train.Server -parser.add_argument( - "--task_index", type=int, default=0, help="Index of task within the job") -args = parser.parse_args() - - -def vgg16_bn_drop(input): - def conv_block(input, num_filter, groups, dropouts): - return fluid.nets.img_conv_group( - input=input, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act='relu', - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type='max') - - conv1 = conv_block(input, 64, 2, [0.3, 0]) - conv2 = conv_block(conv1, 128, 2, [0.4, 0]) - conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) - conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) - conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) - - drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) - fc1 = fluid.layers.fc(input=drop, size=4096, act=None) - bn = fluid.layers.batch_norm(input=fc1, act='relu') - drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) - fc2 = fluid.layers.fc(input=drop2, size=4096, act=None) - return fc2 - - -def main(): - if args.data_set == "cifar10": - classdim = 10 - if args.data_format == 'NCHW': - data_shape = [3, 32, 32] - else: - data_shape = [32, 32, 3] - else: - classdim = 102 - if args.data_format == 'NCHW': - data_shape = [3, 224, 224] - else: - data_shape = [224, 224, 3] - - # Input data - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - - # Train program - net = vgg16_bn_drop(images) - predict = fluid.layers.fc(input=net, size=classdim, act='softmax') - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - - # Evaluator - batch_size = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size) - - # inference program - inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program(batch_acc) - - # Optimization - optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate) - optimize_ops, params_grads = optimizer.minimize(avg_cost) - - # Initialize executor - place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace( - args.device_id) - exe = fluid.Executor(place) - - # test - def test(exe): - test_pass_acc = fluid.average.WeightedAverage() - for batch_id, data in enumerate(test_reader()): - img_data = np.array(map(lambda x: x[0].reshape(data_shape), - data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([-1, 1]) - - outs = exe.run(inference_program, - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[batch_acc, batch_size]) - test_pass_acc.add(value=np.array(outs[0]), weight=np.array(outs[1])) - - return test_pass_acc.eval() - - def train_loop(exe, trainer_prog): - iters = 0 - ts = time.time() - train_pass_acc = fluid.average.WeightedAverage() - for pass_id in range(args.num_passes): - # train - start_time = time.time() - num_samples = 0 - train_pass_acc.reset() - - def run_step(batch_id, data): - img_data = np.array( - map(lambda x: x[0].reshape(data_shape), data)).astype( - "float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([-1, 1]) - - loss, acc, b_size = exe.run( - trainer_prog, - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size]) - return loss, acc, b_size - - if args.profile and args.task_index == 0: - # warmup. - for batch_id, data in enumerate(train_reader()): - if batch_id > 5: break - run_step(batch_id, data) - with profiler.profiler('All', 'total', '/tmp/profile_vgg'): - for batch_id, data in enumerate(train_reader()): - if batch_id > 5: break - run_step(batch_id, data) - - for batch_id, data in enumerate(train_reader()): - ts = time.time() - loss, acc, b_size = run_step(batch_id, data) - iters += 1 - num_samples += len(data) - train_pass_acc.add(value=acc, weight=b_size) - print( - "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, " - "Speed = %.2f img/s" % (pass_id, iters, loss, acc, - len(data) / (time.time() - ts)) - ) # The accuracy is the accumulation of batches, but not the current batch. - - pass_elapsed = time.time() - start_time - pass_train_acc = train_pass_acc.eval() - pass_test_acc = test(exe) - print("Task:%d Pass = %d, Training performance = %f imgs/s, " - "Train accuracy = %f, Test accuracy = %f\n" % - (args.task_index, pass_id, num_samples / pass_elapsed, - pass_train_acc, pass_test_acc)) - - if args.local: - # Parameter initialization - exe.run(fluid.default_startup_program()) - - # data reader - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10() if args.data_set == 'cifar10' - else paddle.dataset.flowers.train(), - buf_size=5120), - batch_size=args.batch_size) - test_reader = paddle.batch( - paddle.dataset.cifar.test10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), - batch_size=args.batch_size) - train_loop(exe, fluid.default_main_program()) - else: - trainers = int(os.getenv("TRAINERS")) # total trainer count - print("trainers total: ", trainers) - - training_role = os.getenv( - "TRAINING_ROLE", - "TRAINER") # get the training role: trainer/pserver - - t = fluid.DistributeTranspiler() - t.transpile( - trainer_id=args.task_index, - pservers=args.ps_hosts, - trainers=trainers) - - if training_role == "PSERVER": - current_endpoint = os.getenv("POD_IP") + ":" + os.getenv( - "PADDLE_INIT_PORT") - if not current_endpoint: - print("need env SERVER_ENDPOINT") - exit(1) - pserver_prog = t.get_pserver_program(current_endpoint) - pserver_startup = t.get_startup_program(current_endpoint, - pserver_prog) - exe.run(pserver_startup) - exe.run(pserver_prog) - elif training_role == "TRAINER": - # Parameter initialization - exe.run(fluid.default_startup_program()) - - # data reader - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10() if args.data_set == 'cifar10' - else paddle.dataset.flowers.train(), - buf_size=5120), - batch_size=args.batch_size) - test_reader = paddle.batch( - paddle.dataset.cifar.test10() if args.data_set == 'cifar10' else - paddle.dataset.flowers.test(), - batch_size=args.batch_size) - - trainer_prog = t.get_trainer_program() - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - # TODO(typhoonzero): change trainer startup program to fetch parameters from pserver - exe.run(fluid.default_startup_program()) - train_loop(exe, trainer_prog) - else: - print("environment var TRAINER_ROLE should be TRAINER os PSERVER") - - -def print_arguments(): - print('----------- Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == "__main__": - print_arguments() - main() diff --git a/benchmark/cluster/vgg16/vgg16_tf.py b/benchmark/cluster/vgg16/vgg16_tf.py deleted file mode 100644 index 2d220478ac..0000000000 --- a/benchmark/cluster/vgg16/vgg16_tf.py +++ /dev/null @@ -1,366 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""VGG16 benchmark in TensorFlow -You can get distribution example template structure here: -https://medium.com/clusterone/how-to-write-distributed-tensorflow-code-with-an-example-on-tensorport-70bf3306adcb -https://www.tensorflow.org/deploy/distributed -""" - -import tensorflow as tf -import paddle.v2 as paddle -import numpy as np -import argparse -import time - -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument( - '--batch_size', type=int, default=128, help="Batch size for training.") -parser.add_argument( - '--learning_rate', - type=float, - default=1e-3, - help="Learning rate for training.") -parser.add_argument('--num_passes', type=int, default=50, help="No. of passes.") -parser.add_argument( - '--device', - type=str, - default='CPU', - choices=['CPU', 'GPU'], - help="The device type.") -parser.add_argument( - '--data_format', - type=str, - default='NHWC', - choices=['NCHW', 'NHWC'], - help='The data order, NCHW=[batch, channels, height, width].' - 'Only support NHWC right now.') -parser.add_argument( - '--data_set', - type=str, - default='cifar10', - choices=['cifar10', 'flowers'], - help='Optional dataset for benchmark.') - -parser.add_argument( - "--ps_hosts", - type=str, - default="", - help="Comma-separated list of hostname:port pairs") -parser.add_argument( - "--worker_hosts", - type=str, - default="", - help="Comma-separated list of hostname:port pairs") -parser.add_argument( - "--job_name", type=str, default="", help="One of 'worker', 'ps'") -# Flags for defining the tf.train.Server -parser.add_argument( - "--task_index", type=int, default=0, help="Index of task within the job") - -args = parser.parse_args() - - -class VGG16Model(object): - def __init__(self): - self.parameters = [] - - def batch_norm_relu(self, inputs, is_training): - """Performs a batch normalization followed by a ReLU.""" - # We set fused=True for a significant speed boost. See - # https://www.tensorflow.org/speed/speed_guide#common_fused_ops - inputs = tf.layers.batch_normalization( - inputs=inputs, - axis=1 if args.data_format == 'NCHW' else -1, - momentum=0.9, - epsilon=1e-05, - center=True, - scale=True, - training=is_training, - fused=True) - inputs = tf.nn.relu(inputs) - return inputs - - def conv_bn_layer(self, - name, - images, - kernel_shape, - is_training, - drop_rate=0.0): - with tf.name_scope(name) as scope: - kernel = tf.Variable( - tf.truncated_normal( - kernel_shape, dtype=tf.float32, stddev=1e-1), - name='weights') - conv = tf.nn.conv2d( - images, - kernel, [1, 1, 1, 1], - data_format=args.data_format, - padding='SAME') - biases = tf.Variable( - tf.constant( - 0.0, shape=[kernel_shape[-1]], dtype=tf.float32), - trainable=True, - name='biases') - out = tf.nn.bias_add(conv, biases) - out = self.batch_norm_relu(out, is_training) - out = tf.layers.dropout(out, rate=drop_rate, training=is_training) - return out - - def fc_layer(self, name, inputs, shape): - with tf.name_scope(name) as scope: - fc_w = tf.Variable( - tf.truncated_normal( - shape, dtype=tf.float32, stddev=1e-1), - name='weights') - fc_b = tf.Variable( - tf.constant( - 0.0, shape=[shape[-1]], dtype=tf.float32), - trainable=True, - name='biases') - out = tf.nn.bias_add(tf.matmul(inputs, fc_w), fc_b) - return out - - def network(self, images, class_dim, is_training): - """ VGG16 model structure. - - TODO(kuke): enable this network to support the 'NCHW' data format - """ - - # conv1 - conv1_1 = self.conv_bn_layer( - 'conv1_1', images, [3, 3, 3, 64], is_training, drop_rate=0.3) - conv1_2 = self.conv_bn_layer( - 'conv1_2', conv1_1, [3, 3, 64, 64], is_training, drop_rate=0.0) - # pool1 - pool1 = tf.nn.max_pool( - conv1_2, - ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], - padding='SAME', - name='pool1') - # conv2 - conv2_1 = self.conv_bn_layer( - 'conv2_1', pool1, [3, 3, 64, 128], is_training, drop_rate=0.4) - conv2_2 = self.conv_bn_layer( - 'conv2_2', conv2_1, [3, 3, 128, 128], is_training, drop_rate=0.0) - # pool2 - pool2 = tf.nn.max_pool( - conv2_2, - ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], - padding='SAME', - name='pool2') - # conv3 - conv3_1 = self.conv_bn_layer( - 'conv3_1', pool2, [3, 3, 128, 256], is_training, drop_rate=0.4) - conv3_2 = self.conv_bn_layer( - 'conv3_2', conv3_1, [3, 3, 256, 256], is_training, drop_rate=0.4) - conv3_3 = self.conv_bn_layer( - 'conv3_3', conv3_2, [3, 3, 256, 256], is_training, drop_rate=0.0) - # pool3 - pool3 = tf.nn.max_pool( - conv3_3, - ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], - padding='SAME', - name='pool3') - # conv4 - conv4_1 = self.conv_bn_layer( - 'conv4_1', pool3, [3, 3, 256, 512], is_training, drop_rate=0.4) - conv4_2 = self.conv_bn_layer( - 'conv4_2', conv4_1, [3, 3, 512, 512], is_training, drop_rate=0.4) - conv4_3 = self.conv_bn_layer( - 'conv4_3', conv4_2, [3, 3, 512, 512], is_training, drop_rate=0.0) - # pool4 - pool4 = tf.nn.max_pool( - conv4_3, - ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], - padding='SAME', - name='pool4') - # conv5 - conv5_1 = self.conv_bn_layer( - 'conv5_1', pool4, [3, 3, 512, 512], is_training, drop_rate=0.4) - conv5_2 = self.conv_bn_layer( - 'conv5_2', conv5_1, [3, 3, 512, 512], is_training, drop_rate=0.4) - conv5_3 = self.conv_bn_layer( - 'conv5_3', conv5_2, [3, 3, 512, 512], is_training, drop_rate=0.0) - # pool5 - pool5 = tf.nn.max_pool( - conv5_3, - ksize=[1, 2, 2, 1], - strides=[1, 2, 2, 1], - padding='SAME', - name='pool4') - # flatten - shape = int(np.prod(pool5.get_shape()[1:])) - pool5_flat = tf.reshape(pool5, [-1, shape]) - # fc1 - drop = tf.layers.dropout(pool5_flat, rate=0.5, training=is_training) - fc1 = self.fc_layer('fc1', drop, [shape, 512]) - # fc2 - bn = self.batch_norm_relu(fc1, is_training) - drop = tf.layers.dropout(bn, rate=0.5, training=is_training) - fc2 = self.fc_layer('fc2', drop, [512, 512]) - - fc3 = self.fc_layer('fc3', fc2, [512, class_dim]) - - return fc3 - - -def run_benchmark(cluster_spec, server): - """Run benchmark on cifar10 or flowers.""" - - if args.data_set == "cifar10": - class_dim = 10 - raw_shape = (3, 32, 32) - dat_shape = (None, 32, 32, 3) if args.data_format == 'NHWC' else ( - None, 3, 32, 32) - else: - class_dim = 102 - raw_shape = (3, 224, 224) - dat_shape = (None, 224, 224, 3) if args.data_format == 'NHWC' else ( - None, 3, 224, 224) - - device = tf.train.replica_device_setter( - worker_device="/job:worker/task:{}".format(args.task_index), - cluster=cluster_spec) - - with tf.device(device): - images = tf.placeholder(tf.float32, shape=dat_shape) - labels = tf.placeholder(tf.int64, shape=(None, )) - is_training = tf.placeholder('bool') - onehot_labels = tf.one_hot(labels, depth=class_dim) - - vgg16 = VGG16Model() - logits = vgg16.network(images, class_dim, is_training) - loss = tf.losses.softmax_cross_entropy( - onehot_labels=onehot_labels, logits=logits) - avg_loss = tf.reduce_mean(loss) - - correct = tf.equal(tf.argmax(logits, 1), labels) - accuracy = tf.reduce_mean(tf.cast(correct, tf.float32)) - - optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) - update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) - global_step = tf.Variable(0, name='global_step', trainable=False) - with tf.control_dependencies(update_ops): - train_op = optimizer.minimize(avg_loss, global_step=global_step) - - summary_op = tf.summary.merge_all() - init_op = tf.global_variables_initializer() - - # data reader - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), - buf_size=5120), - batch_size=args.batch_size) - test_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.test10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), - buf_size=5120), - batch_size=args.batch_size) - - # test - def test(): - test_accs = [] - for batch_id, data in enumerate(test_reader()): - test_images = np.array( - map(lambda x: np.transpose(x[0].reshape(raw_shape), - axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32") - test_labels = np.array(map(lambda x: x[1], data)).astype('int64') - test_accs.append( - accuracy.eval(feed_dict={ - images: test_images, - labels: test_labels, - is_training: False - })) - return np.mean(test_accs) - - config = tf.ConfigProto( - intra_op_parallelism_threads=1, - inter_op_parallelism_threads=1, - log_device_placement=True) - config.gpu_options.allow_growth = True - - hooks = [tf.train.StopAtStepHook(last_step=1000000)] - - with tf.train.MonitoredTrainingSession( - master=server.target, - is_chief=(args.task_index == 0), - hooks=hooks, - config=config) as sess: - iters, num_samples, start_time = 0, 0, 0.0 - for pass_id in range(args.num_passes): - # train - num_samples = 0 - start_time = time.time() - for batch_id, data in enumerate(train_reader()): - train_images = np.array( - map(lambda x: np.transpose(x[0].reshape(raw_shape), - axes=[1, 2, 0]) if args.data_format == 'NHWC' else x[0], data)).astype("float32") - train_labels = np.array(map(lambda x: x[1], data)).astype( - 'int64') - iter_begin_time = time.time() - _, loss, acc = sess.run([train_op, avg_loss, accuracy], - feed_dict={ - images: train_images, - labels: train_labels, - is_training: True - }) - iters += 1 - print( - "Pass = %d, Iters = %d, Loss = %f, Accuracy = %f, Speed=%.2f imgs/sec" - % (pass_id, iters, loss, acc, - len(data) / (time.time() - iter_begin_time))) - num_samples += len(data) - train_elapsed = time.time() - start_time - # test - pass_test_acc = test() - print("Pass = %d, Train speed = %f imgs/s, Test accuracy = %f\n" % - (pass_id, num_samples / train_elapsed, pass_test_acc)) - - -def print_arguments(): - print('----------- Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == '__main__': - print_arguments() - - ps_hosts = args.ps_hosts.split(",") - worker_hosts = args.worker_hosts.split(",") - - # Create a cluster from the parameter server and worker hosts. - cluster_spec = tf.train.ClusterSpec({ - "ps": ps_hosts, - "worker": worker_hosts - }) - - # Create and start a server for the local task. - server = tf.train.Server( - cluster_spec, job_name=args.job_name, task_index=args.task_index) - - if args.job_name == "ps": - print("start pserver") - server.join() - elif args.job_name == "worker": - print("start worker") - run_benchmark(cluster_spec, server) diff --git a/benchmark/cluster/vgg16/vgg16_v2.py b/benchmark/cluster/vgg16/vgg16_v2.py deleted file mode 100644 index 1a66af32d7..0000000000 --- a/benchmark/cluster/vgg16/vgg16_v2.py +++ /dev/null @@ -1,154 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -#Licensed under the Apache License, Version 2.0 (the "License"); -#you may not use this file except in compliance with the License. -#You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -#Unless required by applicable law or agreed to in writing, software -#distributed under the License is distributed on an "AS IS" BASIS, -#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -#See the License for the specific language governing permissions and -#limitations under the License. - -import gzip - -import paddle.v2.dataset.cifar as cifar -import paddle.v2 as paddle -import time -import os - -DATA_DIM = 3 * 32 * 32 -CLASS_DIM = 10 -BATCH_SIZE = os.getenv("BATCH_SIZE") -if BATCH_SIZE: - BATCH_SIZE = int(BATCH_SIZE) -else: - BATCH_SIZE = 128 -print "batch_size", BATCH_SIZE -NODE_COUNT = int(os.getenv("TRAINERS")) -ts = 0 - - -def vgg(input, nums, class_dim): - def conv_block(input, num_filter, groups, num_channels=None): - return paddle.networks.img_conv_group( - input=input, - num_channels=num_channels, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act=paddle.activation.Relu(), - pool_type=paddle.pooling.Max()) - - assert len(nums) == 5 - # the channel of input feature is 3 - conv1 = conv_block(input, 64, nums[0], 3) - conv2 = conv_block(conv1, 128, nums[1]) - conv3 = conv_block(conv2, 256, nums[2]) - conv4 = conv_block(conv3, 512, nums[3]) - conv5 = conv_block(conv4, 512, nums[4]) - - fc_dim = 512 - fc1 = paddle.layer.fc(input=conv5, - size=fc_dim, - act=paddle.activation.Relu(), - layer_attr=paddle.attr.Extra(drop_rate=0.5)) - fc2 = paddle.layer.fc(input=fc1, - size=fc_dim, - act=paddle.activation.Relu(), - layer_attr=paddle.attr.Extra(drop_rate=0.5)) - out = paddle.layer.fc(input=fc2, - size=class_dim, - act=paddle.activation.Softmax()) - return out - - -def vgg13(input, class_dim): - nums = [2, 2, 2, 2, 2] - return vgg(input, nums, class_dim) - - -def vgg16(input, class_dim): - nums = [2, 2, 3, 3, 3] - return vgg(input, nums, class_dim) - - -def vgg19(input, class_dim): - nums = [2, 2, 4, 4, 4] - return vgg(input, nums, class_dim) - - -def main(): - global ts - paddle.init(use_gpu=False) - image = paddle.layer.data( - name="image", type=paddle.data_type.dense_vector(DATA_DIM)) - lbl = paddle.layer.data( - name="label", type=paddle.data_type.integer_value(CLASS_DIM)) - - extra_layers = None - # NOTE: for v2 distributed training need averaging updates. - learning_rate = 1e-3 / NODE_COUNT - out = vgg16(image, class_dim=CLASS_DIM) - cost = paddle.layer.classification_cost(input=out, label=lbl) - - # Create parameters - parameters = paddle.parameters.create(cost) - - # Create optimizer - optimizer = paddle.optimizer.Momentum( - momentum=0.9, - regularization=paddle.optimizer.L2Regularization(rate=0.0005 * - BATCH_SIZE), - learning_rate=learning_rate / BATCH_SIZE, - learning_rate_decay_a=0.1, - learning_rate_decay_b=128000 * 35, - learning_rate_schedule="discexp", ) - - train_reader = paddle.batch( - paddle.reader.shuffle( - cifar.train10(), - # To use other data, replace the above line with: - # reader.train_reader('train.list'), - buf_size=1000), - batch_size=BATCH_SIZE) - test_reader = paddle.batch( - cifar.test10(), - # To use other data, replace the above line with: - # reader.test_reader('val.list'), - batch_size=BATCH_SIZE) - - # Create trainer - trainer = paddle.trainer.SGD(cost=cost, - parameters=parameters, - update_equation=optimizer, - extra_layers=extra_layers, - is_local=False) - - # End batch and end pass event handler - def event_handler(event): - global ts, ts_pass - if isinstance(event, paddle.event.BeginPass): - ts_pass = time.time() - if isinstance(event, paddle.event.BeginIteration): - ts = time.time() - if isinstance(event, paddle.event.EndIteration): - if event.batch_id % 1 == 0: - print "\nPass %d, Batch %d, Cost %f, %s, spent: %f" % ( - event.pass_id, event.batch_id, event.cost, event.metrics, - time.time() - ts) - if isinstance(event, paddle.event.EndPass): - print "Pass %d end, spent: %f" % (event.pass_id, - time.time() - ts_pass) - result = trainer.test(reader=test_reader) - print "\nTest with Pass %d, %s" % (event.pass_id, result.metrics) - - trainer.train( - reader=train_reader, num_passes=200, event_handler=event_handler) - - -if __name__ == '__main__': - main() diff --git a/benchmark/fluid/Dockerfile b/benchmark/fluid/Dockerfile new file mode 100644 index 0000000000..707fadb1fa --- /dev/null +++ b/benchmark/fluid/Dockerfile @@ -0,0 +1,31 @@ +FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04 + +# Use UBUNTU_MIRROR can speed up apt-get speed. +# ARG UBUNTU_MIRROR +# RUN /bin/bash -c 'if [[ -n ${UBUNTU_MIRROR} ]]; then sed -i 's#http://archive.ubuntu.com/ubuntu#${UBUNTU_MIRROR}#g' /etc/apt/sources.list; fi' + +RUN apt-get update && apt-get install -y python python-pip iputils-ping libgtk2.0-dev wget vim net-tools iftop python-opencv +RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.7 /usr/lib/libcudnn.so && ln -s /usr/lib/x86_64-linux-gnu/libnccl.so.2 /usr/lib/libnccl.so + +# IMPORTANT: +# Add "ENV http_proxy=http://ip:port" if your download is slow, and don't forget to unset it at runtime. +# exmaple: unset http_proxy && unset https_proxy && python fluid_benchmark.py ... + +RUN pip install -U pip +RUN pip install -U kubernetes paddlepaddle + +RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.cifar.train10()\npaddle.dataset.flowers.fetch()" | python' +RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.mnist.train()\npaddle.dataset.mnist.test()\npaddle.dataset.imdb.fetch()" | python' +RUN sh -c 'echo "import paddle.v2 as paddle\npaddle.dataset.imikolov.fetch()" | python' +RUN pip uninstall -y paddlepaddle && mkdir /workspace + +ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/paddle_k8s /usr/bin +ADD https://raw.githubusercontent.com/PaddlePaddle/cloud/develop/docker/k8s_tools.py /root +RUN chmod +x /usr/bin/paddle_k8s + +ADD *.whl / +RUN pip install /*.whl && rm -f /*.whl + +ENV LD_LIBRARY_PATH=/usr/local/lib +ADD fluid_benchmark.py recordio_converter.py args.py recordio_converter.py run.sh run_fluid_benchmark.sh /workspace/ +ADD models/ /workspace/models/ diff --git a/benchmark/fluid/README.md b/benchmark/fluid/README.md new file mode 100644 index 0000000000..28cade4634 --- /dev/null +++ b/benchmark/fluid/README.md @@ -0,0 +1,99 @@ +# Fluid Benchmark + +This directory contains several models configurations and tools that used to run +Fluid benchmarks for local and distributed training. + + +## Run the Benchmark + +To start, run the following command to get the full help message: + +```bash +python fluid_benchmark.py --help +``` + +Currently supported `--model` argument include: + +* mnist +* resnet + * you can chose to use different dataset using `--data_set cifar10` or + `--data_set flowers`. +* vgg +* stacked_dynamic_lstm +* machine_translation + +* Run the following command to start a benchmark job locally: + ```bash + python fluid_benchmark.py --model mnist --device GPU + ``` + You can choose to use GPU/CPU training. With GPU training, you can specify + `--gpus ` to run multi GPU training. + You can set async mode parameter server. With async mode, you can specify + `--async_mode` to train model asynchronous. +* Run distributed training with parameter servers: + * see [run_fluid_benchmark.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/benchmark/fluid/run_fluid_benchmark.sh) as an example. + * start parameter servers: + ```bash + PADDLE_TRAINING_ROLE=PSERVER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=1 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model mnist --device GPU --update_method pserver + sleep 15 + ``` + * start trainers: + ```bash + PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=1 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model mnist --device GPU --update_method pserver + ``` +* Run distributed training using NCCL2 + ```bash + PADDLE_PSERVER_PORT=7164 PADDLE_TRAINER_IPS=192.168.0.2,192.168.0.3 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model mnist --device GPU --update_method nccl2 + ``` + +## Prepare the RecordIO file to Achieve Better Performance + +Run the following command will generate RecordIO files like "mnist.recordio" under the path +and batch_size you choose, you can use batch_size=1 so that later reader can change the batch_size +at any time using `fluid.batch`. + +```bash +python -c 'from recordio_converter import *; prepare_mnist("data", 1)' +``` + +## Run Distributed Benchmark on Kubernetes Cluster + +You may need to build a Docker image before submitting a cluster job onto Kubernetes, or you will +have to start all those processes mannually on each node, which is not recommended. + +To build the Docker image, you need to choose a paddle "whl" package to run with, you may either +download it from +http://www.paddlepaddle.org/docs/develop/documentation/zh/build_and_install/pip_install_en.html or +build it by your own. Once you've got the "whl" package, put it under the current directory and run: + +```bash +docker build -t [your docker image name]:[your docker image tag] . +``` + +Then push the image to a Docker registry that your Kubernetes cluster can reach. + +We provide a script `kube_gen_job.py` to generate Kubernetes yaml files to submit +distributed benchmark jobs to your cluster. To generate a job yaml, just run: + +```bash +python kube_gen_job.py --jobname myjob --pscpu 4 --cpu 8 --gpu 8 --psmemory 20 --memory 40 --pservers 4 --trainers 4 --entry "python fluid_benchmark.py --model mnist --gpus 8 --device GPU --update_method pserver " --disttype pserver +``` + +Then the yaml files are generated under directory `myjob`, you can run: + +```bash +kubectl create -f myjob/ +``` + +The job shall start. + + +## Notes for Run Fluid Distributed with NCCL2 and RDMA + +Before running NCCL2 distributed jobs, please check that whether your node has multiple network +interfaces, try to add the environment variable `export NCCL_SOCKET_IFNAME=eth0` to use your actual +network device. + +To run high-performance distributed training, you must prepare your hardware environment to be +able to run RDMA enabled network communication, please check out [this](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/howto/cluster/nccl2_rdma_training.md) +note for details. diff --git a/benchmark/fluid/args.py b/benchmark/fluid/args.py new file mode 100644 index 0000000000..a79f25ccc6 --- /dev/null +++ b/benchmark/fluid/args.py @@ -0,0 +1,134 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse + +__all__ = ['parse_args', ] + +BENCHMARK_MODELS = [ + "machine_translation", "resnet", "vgg", "mnist", "stacked_dynamic_lstm" +] + + +def parse_args(): + parser = argparse.ArgumentParser('Fluid model benchmarks.') + parser.add_argument( + '--model', + type=str, + choices=BENCHMARK_MODELS, + default='resnet', + help='The model to run benchmark with.') + parser.add_argument( + '--batch_size', type=int, default=32, help='The minibatch size.') + # args related to learning rate + parser.add_argument( + '--learning_rate', type=float, default=0.001, help='The learning rate.') + # TODO(wuyi): add "--use_fake_data" option back. + parser.add_argument( + '--skip_batch_num', + type=int, + default=5, + help='The first num of minibatch num to skip, for better performance test' + ) + parser.add_argument( + '--iterations', type=int, default=80, help='The number of minibatches.') + parser.add_argument( + '--pass_num', type=int, default=100, help='The number of passes.') + parser.add_argument( + '--data_format', + type=str, + default='NCHW', + choices=['NCHW', 'NHWC'], + help='The data data_format, now only support NCHW.') + parser.add_argument( + '--device', + type=str, + default='GPU', + choices=['CPU', 'GPU'], + help='The device type.') + parser.add_argument( + '--gpus', + type=int, + default=1, + help='If gpus > 1, will use ParallelExecutor to run, else use Executor.') + # this option is available only for vgg and resnet. + parser.add_argument( + '--cpus', + type=int, + default=1, + help='If cpus > 1, will use ParallelDo to run, else use Executor.') + parser.add_argument( + '--data_set', + type=str, + default='flowers', + choices=['cifar10', 'flowers'], + help='Optional dataset for benchmark.') + parser.add_argument( + '--infer_only', action='store_true', help='If set, run forward only.') + parser.add_argument( + '--use_cprof', action='store_true', help='If set, use cProfile.') + parser.add_argument( + '--use_nvprof', + action='store_true', + help='If set, use nvprof for CUDA.') + parser.add_argument( + '--no_test', + action='store_true', + help='If set, do not test the testset during training.') + parser.add_argument( + '--memory_optimize', + action='store_true', + help='If set, optimize runtime memory before start.') + parser.add_argument( + '--use_fake_data', + action='store_true', + help='If set ommit the actual read data operators.') + parser.add_argument( + '--profile', action='store_true', help='If set, profile a few steps.') + parser.add_argument( + '--update_method', + type=str, + default='local', + choices=['local', 'pserver', 'nccl2'], + help='Choose parameter update method, can be local, pserver, nccl2.') + parser.add_argument( + '--no_split_var', + action='store_true', + default=False, + help='Whether split variables into blocks when update_method is pserver') + parser.add_argument( + '--async_mode', + action='store_true', + default=False, + help='Whether start pserver in async mode to support ASGD') + parser.add_argument( + '--use_reader_op', + action='store_true', + help='Whether to use reader op, and must specify the data path if set this to true.' + ) + parser.add_argument( + '--data_path', + type=str, + default="", + help='Directory that contains all the training recordio files.') + parser.add_argument( + '--use_inference_transpiler', + action='store_true', + help='If set, use inference transpiler to optimize the program.') + parser.add_argument( + '--no_random', + action='store_true', + help='If set, keep the random seed and do not shuffle the data.') + args = parser.parse_args() + return args diff --git a/benchmark/fluid/fluid_benchmark.py b/benchmark/fluid/fluid_benchmark.py new file mode 100644 index 0000000000..6b22f8f520 --- /dev/null +++ b/benchmark/fluid/fluid_benchmark.py @@ -0,0 +1,370 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import cProfile +import time +import os + +import numpy as np + +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.profiler as profiler +import paddle.fluid.transpiler.distribute_transpiler as distribute_transpiler + +from args import * + + +def append_nccl2_prepare(trainer_id): + if trainer_id >= 0: + # append gen_nccl_id at the end of startup program + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + port = os.getenv("PADDLE_PSERVER_PORT") + worker_ips = os.getenv("PADDLE_TRAINER_IPS") + worker_endpoints = [] + for ip in worker_ips.split(","): + worker_endpoints.append(':'.join([ip, port])) + num_trainers = len(worker_endpoints) + current_endpoint = os.getenv("PADDLE_CURRENT_IP") + ":" + port + worker_endpoints.remove(current_endpoint) + + nccl_id_var = fluid.default_startup_program().global_block().create_var( + name="NCCLID", + persistable=True, + type=fluid.core.VarDesc.VarType.RAW) + fluid.default_startup_program().global_block().append_op( + type="gen_nccl_id", + inputs={}, + outputs={"NCCLID": nccl_id_var}, + attrs={ + "endpoint": current_endpoint, + "endpoint_list": worker_endpoints, + "trainer_id": trainer_id + }) + return nccl_id_var, num_trainers, trainer_id + else: + raise Exception("must set positive PADDLE_TRAINER_ID env variables for " + "nccl-based dist train.") + + +def dist_transpile(trainer_id, args): + if trainer_id < 0: + return None, None + + # the port of all pservers, needed by both trainer and pserver + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + # comma separated ips of all pservers, needed by trainer and + # pserver + pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "") + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) + # total number of workers/trainers in the job, needed by + # trainer and pserver + trainers = int(os.getenv("PADDLE_TRAINERS")) + # the IP of the local machine, needed by pserver only + current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port + # the role, should be either PSERVER or TRAINER + training_role = os.getenv("PADDLE_TRAINING_ROLE") + + t = distribute_transpiler.DistributeTranspiler() + t.transpile( + trainer_id, + pservers=pserver_endpoints, + trainers=trainers, + sync_mode=not args.async_mode) + if training_role == "PSERVER": + pserver_program = t.get_pserver_program(current_endpoint) + pserver_startup_program = t.get_startup_program(current_endpoint, + pserver_program) + return pserver_program, pserver_startup_program + elif training_role == "TRAINER": + train_program = t.get_trainer_program() + return train_program, fluid.default_startup_program() + else: + raise ValueError( + 'PADDLE_TRAINING_ROLE environment variable must be either TRAINER or PSERVER' + ) + + +def test(exe, inference_program, test_reader, feeder, batch_acc): + accuracy_evaluator = fluid.metrics.Accuracy() + for batch_id, data in enumerate(test_reader()): + acc = exe.run(inference_program, + feed=feeder.feed(data), + fetch_list=[batch_acc]) + accuracy_evaluator.update(value=np.array(acc), weight=len(data)) + + return accuracy_evaluator.eval() + + +# TODO(wuyi): replace train, train_parallel, test functions with new trainer +# API once it is ready. +def train(avg_loss, infer_prog, optimizer, train_reader, test_reader, batch_acc, + args, train_prog, startup_prog): + if os.getenv("PADDLE_TRAINING_ROLE") == "PSERVER": + place = core.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + exe.run(train_prog) + return + + if args.use_fake_data: + raise Exception( + "fake data is not supported in single GPU test for now.") + + place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(startup_prog) + + # Use inference_transpiler to speedup + if not args.use_reader_op: + feed_var_list = [ + var for var in train_prog.global_block().vars.itervalues() + if var.is_data + ] + feeder = fluid.DataFeeder(feed_var_list, place) + + iters, num_samples, start_time = 0, 0, time.time() + for pass_id in range(args.pass_num): + train_losses = [] + if not args.use_reader_op: + reader_generator = train_reader() + batch_id = 0 + data = None + while True: + if not args.use_reader_op: + data = next(reader_generator, None) + if data == None: + break + if iters == args.iterations: + break + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + + if args.use_reader_op: + try: + loss = exe.run(train_prog, fetch_list=[avg_loss]) + except fluid.core.EnforceNotMet as ex: + break + else: + loss = exe.run(train_prog, + feed=feeder.feed(data), + fetch_list=[avg_loss]) + iters += 1 + batch_id += 1 + # FIXME(wuyi): For use_reader_op, if the current + # pass is not the last, the last batch of this pass + # is also equal to args.batch_size. + if args.use_reader_op: + num_samples += args.batch_size * args.gpus + else: + num_samples += len(data) + train_losses.append(loss) + print("Pass: %d, Iter: %d, Loss: %f\n" % + (pass_id, iters, np.mean(train_losses))) + print_train_time(start_time, time.time(), num_samples) + print("Pass: %d, Loss: %f" % (pass_id, np.mean(train_losses))), + # evaluation + if not args.no_test and batch_acc and not args.use_reader_op: + if args.use_inference_transpiler: + t = fluid.InferenceTranspiler() + t.transpile(infer_prog, place) + + pass_test_acc = test(exe, infer_prog, test_reader, feeder, + batch_acc) + print(", Test Accuracy: %f" % pass_test_acc) + print("\n") + # TODO(wuyi): add warmup passes to get better perf data. + exit(0) + + +# TODO(wuyi): replace train, train_parallel, test functions with new trainer +# API once it is ready. +def train_parallel(avg_loss, infer_prog, optimizer, train_reader, test_reader, + batch_acc, args, train_prog, startup_prog, nccl_id_var, + num_trainers, trainer_id): + place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) + if not args.use_reader_op: + feed_var_list = [ + var for var in train_prog.global_block().vars.itervalues() + if var.is_data + ] + feeder = fluid.DataFeeder(feed_var_list, place) + + # generate fake: + if args.use_fake_data: + for var in feed_var_list: + v = startup_prog.global_block()._clone_variable(var) + var.persistable = True + v.persistable = True + + real_shape = list(var.shape) + real_shape[0] = args.batch_size / args.gpus + startup_prog.global_block().append_op( + outputs={"Out": v}, + type="fill_constant", + attrs={"shape": real_shape, + "value": 1.0, + "dtype": var.dtype}) + + if nccl_id_var and trainer_id == 0: + #FIXME(wuyi): wait other trainer to start listening + time.sleep(30) + + startup_exe = fluid.Executor(place) + startup_exe.run(startup_prog) + strategy = fluid.ExecutionStrategy() + strategy.num_threads = 1 + strategy.allow_op_delay = False + exe = fluid.ParallelExecutor( + True, + avg_loss.name, + exec_strategy=strategy, + num_trainers=num_trainers, + trainer_id=trainer_id) + + for pass_id in range(args.pass_num): + num_samples = 0 + iters = 0 + start_time = time.time() + if not args.use_reader_op: + reader_generator = train_reader() + batch_id = 0 + data = None + while True: + if not args.use_reader_op: + data = next(reader_generator, None) + if data == None: + break + if iters == args.iterations: + break + if args.profile and pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif args.profile and pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile_%d" % trainer_id) + + if iters == args.skip_batch_num: + start_time = time.time() + num_samples = 0 + if args.use_fake_data or args.use_reader_op: + try: + loss, = exe.run([avg_loss.name]) + except fluid.core.EnforceNotMet as ex: + break + else: + loss, = exe.run([avg_loss.name], feed=feeder.feed(data)) + if args.use_reader_op: + num_samples += args.batch_size * args.gpus + else: + num_samples += len(data) + iters += 1 + if batch_id % 1 == 0: + print("Pass %d, batch %d, loss %s" % + (pass_id, batch_id, np.array(loss))) + batch_id += 1 + + print_train_time(start_time, time.time(), num_samples) + if not args.no_test and batch_acc and not args.use_reader_op: + # we have not implement record io for test + # skip test when use args.use_reader_op + test_acc = test(startup_exe, infer_prog, test_reader, feeder, + batch_acc) + print("Pass: %d, Test Accuracy: %f\n" % (pass_id, test_acc)) + + +def print_arguments(args): + vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and + vars(args)['device'] == 'GPU') + print('----------- Configuration Arguments -----------') + for arg, value in sorted(vars(args).iteritems()): + print('%s: %s' % (arg, value)) + print('------------------------------------------------') + + +def print_train_time(start_time, end_time, num_samples): + train_elapsed = end_time - start_time + examples_per_sec = num_samples / train_elapsed + print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % + (num_samples, train_elapsed, examples_per_sec)) + + +def print_paddle_envs(): + print('----------- Configuration envs -----------') + for k in os.environ: + if "PADDLE_" in k: + print "ENV %s:%s" % (k, os.environ[k]) + print('------------------------------------------------') + + +def main(): + args = parse_args() + print_arguments(args) + print_paddle_envs() + if args.no_random: + fluid.default_startup_program().random_seed = 1 + + # the unique trainer id, starting from 0, needed by trainer + # only + nccl_id_var, num_trainers, trainer_id = ( + None, 1, int(os.getenv("PADDLE_TRAINER_ID", "0"))) + + if args.use_cprof: + pr = cProfile.Profile() + pr.enable() + model_def = __import__("models.%s" % args.model, fromlist=["models"]) + train_args = list(model_def.get_model(args)) + train_args.append(args) + # Run optimizer.minimize(avg_loss) + train_args[2].minimize(train_args[0]) + if args.memory_optimize: + fluid.memory_optimize(fluid.default_main_program()) + + if args.update_method == "pserver": + train_prog, startup_prog = dist_transpile(trainer_id, args) + if not train_prog: + raise Exception( + "Must configure correct environments to run dist train.") + train_args.extend([train_prog, startup_prog]) + if args.gpus > 1 and os.getenv("PADDLE_TRAINING_ROLE") == "TRAINER": + train_args.extend([nccl_id_var, num_trainers, trainer_id]) + train_parallel(*train_args) + train(*train_args) + exit(0) + + # for other update methods, use default programs + train_args.append(fluid.default_main_program()) + train_args.append(fluid.default_startup_program()) + + if args.update_method == "nccl2": + nccl_id_var, num_trainers, trainer_id = append_nccl2_prepare(trainer_id) + if args.gpus == 1: + # NOTE: parallel executor use profiler interanlly + if args.use_nvprof and args.device == 'GPU': + with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: + train(*train_args) + else: + train(*train_args) + else: + if args.device == "CPU": + raise Exception("Only support GPU perf with parallel exe") + train_args.extend([nccl_id_var, num_trainers, trainer_id]) + train_parallel(*train_args) + + +if __name__ == "__main__": + main() diff --git a/benchmark/fluid/kube_gen_job.py b/benchmark/fluid/kube_gen_job.py new file mode 100644 index 0000000000..dfe8b5cdd5 --- /dev/null +++ b/benchmark/fluid/kube_gen_job.py @@ -0,0 +1,197 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import yaml +import copy +import argparse +import random +import os +import copy +from kube_templates import pserver, trainer, envs + + +def parse_args(): + parser = argparse.ArgumentParser(description='Generate dist job yamls.') + + parser.add_argument( + '--jobname', default="paddlejob", help='unique job name') + parser.add_argument( + '--cpu', default=1, type=int, help='CPU cores per trainer node') + parser.add_argument( + '--pscpu', default=1, type=int, help='CPU cores per pserver node') + parser.add_argument( + '--gpu', default=0, type=int, help='num of GPUs per node') + parser.add_argument( + '--image', + default="bootstrapper:5000/fluid_benchmark:gpu", + help='num of GPUs per node') + parser.add_argument( + '--pservers', default=1, type=int, help='num of pservers') + parser.add_argument( + '--trainers', default=1, type=int, help='num of trainers') + parser.add_argument('--memory', default=1, type=int, help='trainer memory') + parser.add_argument( + '--psmemory', default=1, type=int, help='pserver memory') + parser.add_argument( + '--port', default=30236, type=int, help='num of trainers') + parser.add_argument( + '--entry', default="python train.py", help='command to run') + parser.add_argument( + '--fluid', default=1, type=int, help='whether is fluid job') + parser.add_argument( + '--rdma', action='store_true', help='whether mount rdma libs') + parser.add_argument( + '--disttype', + default="pserver", + type=str, + choices=['pserver', 'nccl2', 'local'], + help='pserver or nccl2 or local') + + args = parser.parse_args() + return args + + +def gen_job(): + ps = pserver + tn = trainer + args = parse_args() + + ps_container = ps["spec"]["template"]["spec"]["containers"][0] + tn_container = tn["spec"]["template"]["spec"]["containers"][0] + + if args.fluid == 1: + ps_container["command"] = \ + ["paddle_k8s", "start_fluid"] + tn_container["command"] = \ + ["paddle_k8s", "start_fluid"] + ps["metadata"]["name"] = args.jobname + "-pserver" + ps["spec"]["template"]["metadata"]["labels"][ + "paddle-job-pserver"] = args.jobname + tn["metadata"]["name"] = args.jobname + "-trainer" + tn["spec"]["template"]["metadata"]["labels"]["paddle-job"] = args.jobname + + ps_container["image"] = args.image + tn_container["image"] = args.image + + ps_container["resources"]["requests"]["cpu"] = str(args.pscpu) + ps_container["resources"]["requests"]["memory"] = str(args.psmemory) + "Gi" + ps_container["resources"]["limits"]["cpu"] = str(args.pscpu) + ps_container["resources"]["limits"]["memory"] = str(args.psmemory) + "Gi" + + tn_container["resources"]["requests"]["cpu"] = str(args.cpu) + tn_container["resources"]["requests"]["memory"] = str(args.memory) + "Gi" + tn_container["resources"]["limits"]["cpu"] = str(args.cpu) + tn_container["resources"]["limits"]["memory"] = str(args.memory) + "Gi" + if args.gpu > 0: + tn_container["resources"]["requests"][ + "alpha.kubernetes.io/nvidia-gpu"] = str(args.gpu) + tn_container["resources"]["limits"][ + "alpha.kubernetes.io/nvidia-gpu"] = str(args.gpu) + + ps["spec"]["replicas"] = int(args.pservers) + tn["spec"]["parallelism"] = int(args.trainers) + tn["spec"]["completions"] = int(args.trainers) + ps_container["ports"][0]["name"] = "jobport-" + str(args.port) + ps_container["ports"][0]["containerPort"] = args.port + spreadport = random.randint(40000, 60000) + tn_container["ports"][0]["name"] = "spr-" + str(spreadport) + tn_container["ports"][0]["containerPort"] = spreadport + + envs.append({"name": "PADDLE_JOB_NAME", "value": args.jobname}) + envs.append({"name": "PADDLE_TRAINERS", "value": str(args.trainers)}) + envs.append({"name": "PADDLE_PSERVERS", "value": str(args.pservers)}) + envs.append({"name": "ENTRY", "value": args.entry}) + envs.append({"name": "PADDLE_PSERVER_PORT", "value": str(args.port)}) + # NOTE: these directories below are cluster specific, please modify + # this settings before you run on your own cluster. + envs.append({ + "name": "LD_LIBRARY_PATH", + "value": + "/usr/local/lib:/usr/local/nvidia/lib64:/usr/local/rdma/lib64:/usr/lib64/mlnx_ofed/valgrind" + }) + + volumes = [{ + "name": "nvidia-driver", + "hostPath": { + "path": "/usr/local/nvidia/lib64" + } + }] + volumeMounts = [{ + "mountPath": "/usr/local/nvidia/lib64", + "name": "nvidia-driver" + }] + + if args.rdma: + volumes.extend([{ + "name": "ibetc", + "hostPath": { + "path": "/etc/libibverbs.d" + } + }, { + "name": "iblibs", + "hostPath": { + "path": "/usr/local/rdma" + } + }, { + "name": "valgrind", + "hostPath": { + "path": "/usr/lib64/mlnx_ofed/valgrind" + } + }]) + volumeMounts.extend([{ + "mountPath": "/etc/libibverbs.d", + "name": "ibetc" + }, { + "mountPath": "/usr/local/rdma", + "name": "iblibs" + }, { + "mountPath": "/usr/lib64/mlnx_ofed/valgrind", + "name": "valgrind" + }]) + # append shm for NCCL2 + volumes.append({"name": "dshm", "emptyDir": {"medium": "Memory"}}) + volumeMounts.append({"mountPath": "/dev/shm", "name": "dshm"}) + + tn["spec"]["template"]["spec"]["volumes"] = volumes + tn_container["volumeMounts"] = volumeMounts + + ps_container["env"] = copy.deepcopy(envs) + ps_container["env"].append({ + "name": "PADDLE_TRAINING_ROLE", + "value": "PSERVER" + }) + tn_container["env"] = envs + if args.disttype == "pserver": + tn_container["env"].append({ + "name": "PADDLE_TRAINING_ROLE", + "value": "TRAINER" + }) + elif args.disttype == "nccl2" or args.disttype == "local": + # NCCL2 have no training role, set to plain WORKER + tn_container["env"].append({ + "name": "PADDLE_TRAINING_ROLE", + "value": "WORKER" + }) + + os.mkdir(args.jobname) + if args.disttype == "pserver": + with open("%s/pserver.yaml" % args.jobname, "w") as fn: + yaml.dump(ps, fn) + + with open("%s/trainer.yaml" % args.jobname, "w") as fn: + yaml.dump(tn, fn) + + +if __name__ == "__main__": + gen_job() diff --git a/benchmark/fluid/kube_templates/__init__.py b/benchmark/fluid/kube_templates/__init__.py new file mode 100644 index 0000000000..2d09d940a5 --- /dev/null +++ b/benchmark/fluid/kube_templates/__init__.py @@ -0,0 +1,66 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pserver import pserver +from trainer import trainer + +__all__ = ["pserver", "trainer", "envs"] + +envs = [ + # envs that don't need to change + { + "name": "GLOG_v", + "value": "0" + }, + { + "name": "GLOG_logtostderr", + "value": "1" + }, + { + "name": "TOPOLOGY", + "value": "" + }, + { + "name": "TRAINER_PACKAGE", + "value": "/workspace" + }, + { + "name": "PADDLE_INIT_NICS", + "value": "eth2" + }, + { + "name": "NAMESPACE", + "valueFrom": { + "fieldRef": { + "fieldPath": "metadata.namespace" + } + } + }, + { + "name": "POD_IP", + "valueFrom": { + "fieldRef": { + "fieldPath": "status.podIP" + } + } + }, + { + "name": "PADDLE_CURRENT_IP", + "valueFrom": { + "fieldRef": { + "fieldPath": "status.podIP" + } + } + } +] diff --git a/benchmark/fluid/kube_templates/pserver.py b/benchmark/fluid/kube_templates/pserver.py new file mode 100644 index 0000000000..b54982c806 --- /dev/null +++ b/benchmark/fluid/kube_templates/pserver.py @@ -0,0 +1,58 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +pserver = { + "apiVersion": "extensions/v1beta1", + "kind": "ReplicaSet", + "metadata": { + "name": "jobname-pserver" + }, + "spec": { + "replicas": 1, + "template": { + "metadata": { + "labels": { + "paddle-job-pserver": "jobname" + } + }, + "spec": { + "hostNetwork": True, + "imagePullSecrets": [{ + "name": "job-registry-secret" + }], + "containers": [{ + "name": "pserver", + "image": "", + "imagePullPolicy": "Always", + "ports": [{ + "name": "jobport-1", + "containerPort": 1 + }], + "env": [], + "command": ["paddle_k8s", "start_pserver"], + "resources": { + "requests": { + "memory": "10Gi", + "cpu": "4" + }, + "limits": { + "memory": "10Gi", + "cpu": "4" + } + } + }] + } + } + } +} diff --git a/benchmark/fluid/kube_templates/trainer.py b/benchmark/fluid/kube_templates/trainer.py new file mode 100644 index 0000000000..b915d31e37 --- /dev/null +++ b/benchmark/fluid/kube_templates/trainer.py @@ -0,0 +1,70 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +trainer = { + "apiVersion": "batch/v1", + "kind": "Job", + "metadata": { + "name": "jobname-pserver" + }, + "spec": { + "parallelism": 4, + "completions": 4, + "template": { + "metadata": { + "labels": { + "paddle-job": "jobname" + } + }, + "spec": { + "hostNetwork": True, + "imagePullSecrets": [{ + "name": "job-registry-secret" + }], + "restartPolicy": "Never", + "containers": [{ + "name": "trainer", + "image": "", + "imagePullPolicy": "Always", + # to let container set rlimit + "securityContext": { + "privileged": True + # TODO(wuyi): use below specific cap instead of privileged, + # using privileged will cause all GPU device are visible + # in the container. + # "capabilities": { + # "add": ["SYS_RESOURCE"] + # } + }, + "ports": [{ + "name": "jobport-1", + "containerPort": 1 + }], + "env": [], + "command": ["paddle_k8s", "start_trainer", "v2"], + "resources": { + "requests": { + "memory": "10Gi", + "cpu": "4", + }, + "limits": { + "memory": "10Gi", + "cpu": "4", + } + } + }] + } + } + } +} diff --git a/benchmark/fluid/mnist.py b/benchmark/fluid/mnist.py deleted file mode 100644 index 1e2185dfac..0000000000 --- a/benchmark/fluid/mnist.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import numpy as np -import argparse -import time - -import paddle -import paddle.fluid as fluid -import paddle.fluid.profiler as profiler - -SEED = 1 -DTYPE = "float32" - -# random seed must set before configuring the network. -# fluid.default_startup_program().random_seed = SEED - - -def parse_args(): - parser = argparse.ArgumentParser("mnist model benchmark.") - parser.add_argument( - '--batch_size', type=int, default=128, help='The minibatch size.') - parser.add_argument( - '--skip_batch_num', - type=int, - default=5, - help='The first num of minibatch num to skip, for better performance test' - ) - parser.add_argument( - '--iterations', type=int, default=35, help='The number of minibatches.') - parser.add_argument( - '--pass_num', type=int, default=5, help='The number of passes.') - parser.add_argument( - '--device', - type=str, - default='GPU', - choices=['CPU', 'GPU'], - help='The device type.') - parser.add_argument( - '--infer_only', action='store_true', help='If set, run forward only.') - parser.add_argument( - '--use_cprof', action='store_true', help='If set, use cProfile.') - parser.add_argument( - '--use_nvprof', - action='store_true', - help='If set, use nvprof for CUDA.') - parser.add_argument( - '--with_test', - action='store_true', - help='If set, test the testset during training.') - args = parser.parse_args() - return args - - -def cnn_model(data): - conv_pool_1 = fluid.nets.simple_img_conv_pool( - input=data, - filter_size=5, - num_filters=20, - pool_size=2, - pool_stride=2, - act="relu") - conv_pool_2 = fluid.nets.simple_img_conv_pool( - input=conv_pool_1, - filter_size=5, - num_filters=50, - pool_size=2, - pool_stride=2, - act="relu") - - # TODO(dzhwinter) : refine the initializer and random seed settting - SIZE = 10 - input_shape = conv_pool_2.shape - param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] - scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 - - predict = fluid.layers.fc( - input=conv_pool_2, - size=SIZE, - act="softmax", - param_attr=fluid.param_attr.ParamAttr( - initializer=fluid.initializer.NormalInitializer( - loc=0.0, scale=scale))) - return predict - - -def eval_test(exe, batch_acc, batch_size_tensor, inference_program): - test_reader = paddle.batch( - paddle.dataset.mnist.test(), batch_size=args.batch_size) - test_pass_acc = fluid.average.WeightedAverage() - for batch_id, data in enumerate(test_reader()): - img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]), - data)).astype(DTYPE) - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([len(y_data), 1]) - - acc, weight = exe.run(inference_program, - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[batch_acc, batch_size_tensor]) - test_pass_acc.add(value=acc, weight=weight) - pass_acc = test_pass_acc.eval() - return pass_acc - - -def run_benchmark(model, args): - if args.use_cprof: - pr = cProfile.Profile() - pr.enable() - start_time = time.time() - # Input data - images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - - # Train program - predict = model(images) - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - - # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) - - # inference program - inference_program = fluid.default_main_program().clone() - - # Optimization - opt = fluid.optimizer.AdamOptimizer( - learning_rate=0.001, beta1=0.9, beta2=0.999) - opt.minimize(avg_cost) - - fluid.memory_optimize(fluid.default_main_program()) - - # Initialize executor - place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0) - exe = fluid.Executor(place) - - # Parameter initialization - exe.run(fluid.default_startup_program()) - - # Reader - train_reader = paddle.batch( - paddle.dataset.mnist.train(), batch_size=args.batch_size) - - accuracy = fluid.metrics.Accuracy() - iters, num_samples, start_time = 0, 0, time.time() - for pass_id in range(args.pass_num): - accuracy.reset() - train_accs = [] - train_losses = [] - for batch_id, data in enumerate(train_reader()): - if iters == args.skip_batch_num: - start_time = time.time() - num_samples = 0 - if iters == args.iterations: - break - img_data = np.array( - map(lambda x: x[0].reshape([1, 28, 28]), data)).astype(DTYPE) - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([len(y_data), 1]) - - outs = exe.run( - fluid.default_main_program(), - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor] - ) # The accuracy is the accumulation of batches, but not the current batch. - accuracy.update(value=outs[1], weight=outs[2]) - iters += 1 - num_samples += len(y_data) - loss = np.array(outs[0]) - acc = np.array(outs[1]) - train_losses.append(loss) - train_accs.append(acc) - print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % - (pass_id, iters, loss, acc)) - - print("Pass: %d, Loss: %f, Train Accuray: %f\n" % - (pass_id, np.mean(train_losses), np.mean(train_accs))) - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % - (num_samples, train_elapsed, examples_per_sec)) - # evaluation - if args.with_test: - test_avg_acc = eval_test(exe, batch_acc, batch_size_tensor, - inference_program) - exit(0) - - -def print_arguments(args): - vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and - vars(args)['device'] == 'GPU') - print('----------- mnist Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == '__main__': - args = parse_args() - print_arguments(args) - if args.use_nvprof and args.device == 'GPU': - with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: - run_benchmark(cnn_model, args) - else: - run_benchmark(cnn_model, args) diff --git a/benchmark/fluid/models/__init__.py b/benchmark/fluid/models/__init__.py new file mode 100644 index 0000000000..1c3fcac8dd --- /dev/null +++ b/benchmark/fluid/models/__init__.py @@ -0,0 +1,17 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__all__ = [ + "machine_translation", "resnet", "vgg", "mnist", "stacked_dynamic_lstm" +] diff --git a/benchmark/fluid/machine_translation.py b/benchmark/fluid/models/machine_translation.py similarity index 56% rename from benchmark/fluid/machine_translation.py rename to benchmark/fluid/models/machine_translation.py index adde5f21ac..17f6b03826 100644 --- a/benchmark/fluid/machine_translation.py +++ b/benchmark/fluid/models/machine_translation.py @@ -27,74 +27,6 @@ import paddle.fluid.core as core import paddle.fluid.framework as framework from paddle.fluid.executor import Executor -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument( - "--embedding_dim", - type=int, - default=512, - help="The dimension of embedding table. (default: %(default)d)") -parser.add_argument( - "--encoder_size", - type=int, - default=512, - help="The size of encoder bi-rnn unit. (default: %(default)d)") -parser.add_argument( - "--decoder_size", - type=int, - default=512, - help="The size of decoder rnn unit. (default: %(default)d)") -parser.add_argument( - "--batch_size", - type=int, - default=16, - help="The sequence number of a mini-batch data. (default: %(default)d)") -parser.add_argument( - '--skip_batch_num', - type=int, - default=5, - help='The first num of minibatch num to skip, for better performance test') -parser.add_argument( - '--iterations', type=int, default=80, help='The number of minibatches.') -parser.add_argument( - "--dict_size", - type=int, - default=30000, - help="The dictionary capacity. Dictionaries of source sequence and " - "target dictionary have same capacity. (default: %(default)d)") -parser.add_argument( - "--pass_num", - type=int, - default=2, - help="The pass number to train. (default: %(default)d)") -parser.add_argument( - "--learning_rate", - type=float, - default=0.0002, - help="Learning rate used to train the model. (default: %(default)f)") -parser.add_argument( - "--infer_only", action='store_true', help="If set, run forward only.") -parser.add_argument( - "--beam_size", - type=int, - default=3, - help="The width for beam searching. (default: %(default)d)") -parser.add_argument( - '--device', - type=str, - default='GPU', - choices=['CPU', 'GPU'], - help="The device type.") -parser.add_argument( - "--max_length", - type=int, - default=250, - help="The maximum length of sequence when doing generation. " - "(default: %(default)d)") -parser.add_argument( - '--with_test', - action='store_true', - help='If set, test the testset during training.') - def lstm_step(x_t, hidden_t_prev, cell_t_prev, size): def linear(inputs): @@ -241,21 +173,6 @@ def seq_to_seq_net(embedding_dim, encoder_size, decoder_size, source_dict_dim, return avg_cost, feeding_list -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - lod_t = core.LoDTensor() - lod_t.set(flattened_data, place) - lod_t.set_lod([lod]) - return lod_t, lod[-1] - - def lodtensor_to_ndarray(lod_tensor): dims = lod_tensor.get_dims() ndarray = np.zeros(shape=dims).astype('float32') @@ -264,116 +181,39 @@ def lodtensor_to_ndarray(lod_tensor): return ndarray -def train(): +def get_model(args): + if args.use_reader_op: + raise Exception("machine_translation do not support reader op for now.") + embedding_dim = 512 + encoder_size = 512 + decoder_size = 512 + dict_size = 30000 + beam_size = 3 + max_length = 250 avg_cost, feeding_list = seq_to_seq_net( - args.embedding_dim, - args.encoder_size, - args.decoder_size, - args.dict_size, - args.dict_size, + embedding_dim, + encoder_size, + decoder_size, + dict_size, + dict_size, False, - beam_size=args.beam_size, - max_length=args.max_length) + beam_size=beam_size, + max_length=max_length) # clone from default main program inference_program = fluid.default_main_program().clone() optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate) - optimizer.minimize(avg_cost) - - fluid.memory_optimize(fluid.default_main_program()) train_batch_generator = paddle.batch( paddle.reader.shuffle( - paddle.dataset.wmt14.train(args.dict_size), buf_size=1000), - batch_size=args.batch_size) + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=args.batch_size * args.gpus) test_batch_generator = paddle.batch( paddle.reader.shuffle( - paddle.dataset.wmt14.test(args.dict_size), buf_size=1000), + paddle.dataset.wmt14.test(dict_size), buf_size=1000), batch_size=args.batch_size) - place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) - exe = Executor(place) - exe.run(framework.default_startup_program()) - - def do_validation(): - total_loss = 0.0 - count = 0 - for batch_id, data in enumerate(test_batch_generator()): - src_seq = to_lodtensor(map(lambda x: x[0], data), place)[0] - trg_seq = to_lodtensor(map(lambda x: x[1], data), place)[0] - lbl_seq = to_lodtensor(map(lambda x: x[2], data), place)[0] - - fetch_outs = exe.run(inference_program, - feed={ - feeding_list[0]: src_seq, - feeding_list[1]: trg_seq, - feeding_list[2]: lbl_seq - }, - fetch_list=[avg_cost], - return_numpy=False) - - total_loss += lodtensor_to_ndarray(fetch_outs[0])[0] - count += 1 - - return total_loss / count - - iters, num_samples, start_time = 0, 0, time.time() - for pass_id in xrange(args.pass_num): - train_accs = [] - train_losses = [] - for batch_id, data in enumerate(train_batch_generator()): - if iters == args.skip_batch_num: - start_time = time.time() - num_samples = 0 - if iters == args.iterations: - break - src_seq, word_num = to_lodtensor(map(lambda x: x[0], data), place) - num_samples += word_num - trg_seq, word_num = to_lodtensor(map(lambda x: x[1], data), place) - num_samples += word_num - lbl_seq, _ = to_lodtensor(map(lambda x: x[2], data), place) - - fetch_outs = exe.run(framework.default_main_program(), - feed={ - feeding_list[0]: src_seq, - feeding_list[1]: trg_seq, - feeding_list[2]: lbl_seq - }, - fetch_list=[avg_cost]) - - iters += 1 - loss = np.array(fetch_outs[0]) - print( - "Pass = %d, Iter = %d, Loss = %f" % (pass_id, iters, loss) - ) # The accuracy is the accumulation of batches, but not the current batch. - - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % - (num_samples, train_elapsed, examples_per_sec)) - # evaluation - if args.with_test: - test_loss = do_validation() - exit(0) - - -def infer(): - pass - - -def print_arguments(args): - print('----------- seq2seq Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == '__main__': - args = parser.parse_args() - print_arguments(args) - if args.infer_only: - infer() - else: - train() + return avg_cost, inference_program, optimizer, train_batch_generator, \ + test_batch_generator, None diff --git a/benchmark/fluid/models/mnist.py b/benchmark/fluid/models/mnist.py new file mode 100644 index 0000000000..8e740dc689 --- /dev/null +++ b/benchmark/fluid/models/mnist.py @@ -0,0 +1,125 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import numpy as np +import argparse +import time +import cProfile +import os + +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler + +SEED = 1 +DTYPE = "float32" + +# random seed must set before configuring the network. +# fluid.default_startup_program().random_seed = SEED + + +def cnn_model(data): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=data, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + + # TODO(dzhwinter) : refine the initializer and random seed settting + SIZE = 10 + input_shape = conv_pool_2.shape + param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] + scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + + predict = fluid.layers.fc( + input=conv_pool_2, + size=SIZE, + act="softmax", + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale))) + return predict + + +def get_model(args): + if args.use_reader_op: + filelist = [ + os.path.join(args.data_path, f) for f in os.listdir(args.data_path) + ] + data_file = fluid.layers.open_files( + filenames=filelist, + shapes=[[-1, 1, 28, 28], (-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int64"], + thread_num=args.gpus, + pass_num=args.pass_num) + data_file = fluid.layers.double_buffer( + fluid.layers.batch( + data_file, batch_size=args.batch_size)) + images, label = fluid.layers.read_file(data_file) + else: + images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + if args.device == 'CPU' and args.cpus > 1: + places = fluid.layers.get_places(args.cpus) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + predict = cnn_model(pd.read_input(images)) + label = pd.read_input(label) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + pd.write_output(avg_cost) + pd.write_output(batch_acc) + + avg_cost, batch_acc = pd() + avg_cost = fluid.layers.mean(avg_cost) + batch_acc = fluid.layers.mean(batch_acc) + else: + # Train program + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + # inference program + inference_program = fluid.default_main_program().clone() + + # Optimization + opt = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, beta1=0.9, beta2=0.999) + + # Reader + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=args.batch_size * args.gpus) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=args.batch_size) + return avg_cost, inference_program, opt, train_reader, test_reader, batch_acc diff --git a/benchmark/fluid/models/resnet.py b/benchmark/fluid/models/resnet.py new file mode 100644 index 0000000000..d44a9c07d3 --- /dev/null +++ b/benchmark/fluid/models/resnet.py @@ -0,0 +1,208 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import functools +import numpy as np +import time +import os + +import cProfile, pstats, StringIO + +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.profiler as profiler +from recordio_converter import imagenet_train, imagenet_test + + +def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): + conv1 = fluid.layers.conv2d( + input=input, + filter_size=filter_size, + num_filters=ch_out, + stride=stride, + padding=padding, + act=None, + bias_attr=False) + return fluid.layers.batch_norm(input=conv1, act=act) + + +def shortcut(input, ch_out, stride): + ch_in = input.shape[1] # if args.data_format == 'NCHW' else input.shape[-1] + if ch_in != ch_out: + return conv_bn_layer(input, ch_out, 1, stride, 0, None) + else: + return input + + +def basicblock(input, ch_out, stride): + short = shortcut(input, ch_out, stride) + conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) + conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, act=None) + return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') + + +def bottleneck(input, ch_out, stride): + short = shortcut(input, ch_out * 4, stride) + conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) + conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) + conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, act=None) + return fluid.layers.elementwise_add(x=short, y=conv3, act='relu') + + +def layer_warp(block_func, input, ch_out, count, stride): + res_out = block_func(input, ch_out, stride) + for i in range(1, count): + res_out = block_func(res_out, ch_out, 1) + return res_out + + +def resnet_imagenet(input, class_dim, depth=50, data_format='NCHW'): + + cfg = { + 18: ([2, 2, 2, 1], basicblock), + 34: ([3, 4, 6, 3], basicblock), + 50: ([3, 4, 6, 3], bottleneck), + 101: ([3, 4, 23, 3], bottleneck), + 152: ([3, 8, 36, 3], bottleneck) + } + stages, block_func = cfg[depth] + conv1 = conv_bn_layer(input, ch_out=64, filter_size=7, stride=2, padding=3) + pool1 = fluid.layers.pool2d( + input=conv1, pool_type='avg', pool_size=3, pool_stride=2) + res1 = layer_warp(block_func, pool1, 64, stages[0], 1) + res2 = layer_warp(block_func, res1, 128, stages[1], 2) + res3 = layer_warp(block_func, res2, 256, stages[2], 2) + res4 = layer_warp(block_func, res3, 512, stages[3], 2) + pool2 = fluid.layers.pool2d( + input=res4, + pool_size=7, + pool_type='avg', + pool_stride=1, + global_pooling=True) + out = fluid.layers.fc(input=pool2, size=class_dim, act='softmax') + return out + + +def resnet_cifar10(input, class_dim, depth=32, data_format='NCHW'): + assert (depth - 2) % 6 == 0 + + n = (depth - 2) // 6 + + conv1 = conv_bn_layer( + input=input, ch_out=16, filter_size=3, stride=1, padding=1) + res1 = layer_warp(basicblock, conv1, 16, n, 1) + res2 = layer_warp(basicblock, res1, 32, n, 2) + res3 = layer_warp(basicblock, res2, 64, n, 2) + pool = fluid.layers.pool2d( + input=res3, pool_size=8, pool_type='avg', pool_stride=1) + out = fluid.layers.fc(input=pool, size=class_dim, act='softmax') + return out + + +def get_model(args): + model = resnet_cifar10 + if args.data_set == "cifar10": + class_dim = 10 + if args.data_format == 'NCHW': + dshape = [3, 32, 32] + else: + dshape = [32, 32, 3] + model = resnet_cifar10 + train_reader = paddle.dataset.cifar.train10() + test_reader = paddle.dataset.cifar.test10() + elif args.data_set == "flowers": + class_dim = 102 + if args.data_format == 'NCHW': + dshape = [3, 224, 224] + else: + dshape = [224, 224, 3] + model = resnet_imagenet + train_reader = paddle.dataset.flowers.train() + test_reader = paddle.dataset.flowers.test() + elif args.data_set == "imagenet": + class_dim = 1000 + if args.data_format == 'NCHW': + dshape = [3, 224, 224] + else: + dshape = [224, 224, 3] + model = resnet_imagenet + if not args.data_path: + raise Exception( + "Must specify --data_path when training with imagenet") + train_reader = imagenet_train(args.data_path) + test_reader = imagenet_test(args.data_path) + + if args.use_reader_op: + filelist = [ + os.path.join(args.data_path, f) for f in os.listdir(args.data_path) + ] + data_file = fluid.layers.open_files( + filenames=filelist, + shapes=[[-1] + dshape, (-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int64"], + thread_num=args.gpus, + pass_num=args.pass_num) + data_file = fluid.layers.double_buffer( + fluid.layers.batch( + data_file, batch_size=args.batch_size)) + input, label = fluid.layers.read_file(data_file) + else: + input = fluid.layers.data(name='data', shape=dshape, dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + if args.device == 'CPU' and args.cpus > 1: + places = fluid.layers.get_places(args.cpus) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + predict = model(pd.read_input(input), class_dim) + label = pd.read_input(label) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + pd.write_output(avg_cost) + pd.write_output(batch_acc) + + avg_cost, batch_acc = pd() + avg_cost = fluid.layers.mean(avg_cost) + batch_acc = fluid.layers.mean(batch_acc) + else: + predict = model(input, class_dim) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + batch_acc = fluid.layers.accuracy(input=predict, label=label) + + inference_program = fluid.default_main_program().clone() + with fluid.program_guard(inference_program): + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc]) + + optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + + batched_train_reader = paddle.batch( + train_reader if args.no_random else paddle.reader.shuffle( + train_reader, buf_size=5120), + batch_size=args.batch_size * args.gpus, + drop_last=True) + batched_test_reader = paddle.batch( + test_reader, batch_size=args.batch_size, drop_last=True) + + return avg_cost, inference_program, optimizer, batched_train_reader,\ + batched_test_reader, batch_acc diff --git a/benchmark/fluid/models/stacked_dynamic_lstm.py b/benchmark/fluid/models/stacked_dynamic_lstm.py new file mode 100644 index 0000000000..3231542a17 --- /dev/null +++ b/benchmark/fluid/models/stacked_dynamic_lstm.py @@ -0,0 +1,127 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import cPickle +import os +import random +import time + +import numpy +import paddle +import paddle.dataset.imdb as imdb +import paddle.fluid as fluid +import paddle.batch as batch +import paddle.fluid.profiler as profiler + +word_dict = imdb.word_dict() + + +def crop_sentence(reader, crop_size): + unk_value = word_dict[''] + + def __impl__(): + for item in reader(): + if len([x for x in item[0] if x != unk_value]) < crop_size: + yield item + + return __impl__ + + +def get_model(args): + if args.use_reader_op: + raise Exception( + "stacked_dynamic_lstm do not support reader op for now.") + lstm_size = 512 + emb_dim = 512 + crop_size = 1500 + + data = fluid.layers.data( + name="words", shape=[1], lod_level=1, dtype='int64') + sentence = fluid.layers.embedding( + input=data, size=[len(word_dict), emb_dim]) + + sentence = fluid.layers.fc(input=sentence, size=lstm_size, act='tanh') + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + word = rnn.step_input(sentence) + prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) + prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) + + def gate_common( + ipt, + hidden, + size, ): + gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) + gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) + gate = fluid.layers.sums(input=[gate0, gate1]) + return gate + + forget_gate = fluid.layers.sigmoid( + x=gate_common(word, prev_hidden, lstm_size)) + input_gate = fluid.layers.sigmoid( + x=gate_common(word, prev_hidden, lstm_size)) + output_gate = fluid.layers.sigmoid( + x=gate_common(word, prev_hidden, lstm_size)) + cell_gate = fluid.layers.tanh( + x=gate_common(word, prev_hidden, lstm_size)) + + cell = fluid.layers.sums(input=[ + fluid.layers.elementwise_mul( + x=forget_gate, y=prev_cell), fluid.layers.elementwise_mul( + x=input_gate, y=cell_gate) + ]) + + hidden = fluid.layers.elementwise_mul( + x=output_gate, y=fluid.layers.tanh(x=cell)) + + rnn.update_memory(prev_cell, cell) + rnn.update_memory(prev_hidden, hidden) + rnn.output(hidden) + + last = fluid.layers.sequence_pool(rnn(), 'last') + logit = fluid.layers.fc(input=last, size=2, act='softmax') + loss = fluid.layers.cross_entropy( + input=logit, + label=fluid.layers.data( + name='label', shape=[1], dtype='int64')) + loss = fluid.layers.mean(x=loss) + + # add acc + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy(input=logit, label=fluid.layers.data(name='label', \ + shape=[1], dtype='int64'), total=batch_size_tensor) + + inference_program = fluid.default_main_program().clone() + with fluid.program_guard(inference_program): + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc, batch_size_tensor]) + + adam = fluid.optimizer.Adam() + + train_reader = batch( + paddle.reader.shuffle( + crop_sentence(imdb.train(word_dict), crop_size), buf_size=25000), + batch_size=args.batch_size * args.gpus) + test_reader = batch( + paddle.reader.shuffle( + crop_sentence(imdb.test(word_dict), crop_size), buf_size=25000), + batch_size=args.batch_size) + + return loss, inference_program, adam, train_reader, test_reader, batch_acc diff --git a/benchmark/fluid/models/vgg.py b/benchmark/fluid/models/vgg.py new file mode 100644 index 0000000000..932601302d --- /dev/null +++ b/benchmark/fluid/models/vgg.py @@ -0,0 +1,121 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""VGG16 benchmark in Fluid""" +from __future__ import print_function + +import sys +import time +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import argparse +import functools +import os + + +def vgg16_bn_drop(input): + def conv_block(input, num_filter, groups, dropouts): + return fluid.nets.img_conv_group( + input=input, + pool_size=2, + pool_stride=2, + conv_num_filter=[num_filter] * groups, + conv_filter_size=3, + conv_act='relu', + conv_with_batchnorm=True, + conv_batchnorm_drop_rate=dropouts, + pool_type='max') + + conv1 = conv_block(input, 64, 2, [0.3, 0]) + conv2 = conv_block(conv1, 128, 2, [0.4, 0]) + conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) + conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) + conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + + drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) + fc1 = fluid.layers.fc(input=drop, size=512, act=None) + bn = fluid.layers.batch_norm(input=fc1, act='relu') + drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) + fc2 = fluid.layers.fc(input=drop2, size=512, act=None) + return fc2 + + +def get_model(args): + if args.data_set == "cifar10": + classdim = 10 + if args.data_format == 'NCHW': + data_shape = [3, 32, 32] + else: + data_shape = [32, 32, 3] + else: + classdim = 102 + if args.data_format == 'NCHW': + data_shape = [3, 224, 224] + else: + data_shape = [224, 224, 3] + + if args.use_reader_op: + filelist = [ + os.path.join(args.data_path, f) for f in os.listdir(args.data_path) + ] + data_file = fluid.layers.open_files( + filenames=filelist, + shapes=[[-1] + data_shape, (-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int64"], + thread_num=args.gpus, + pass_num=args.pass_num) + data_file = fluid.layers.double_buffer( + fluid.layers.batch( + data_file, batch_size=args.batch_size)) + images, label = fluid.layers.read_file(data_file) + else: + images = fluid.layers.data( + name='data', shape=data_shape, dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # Train program + net = vgg16_bn_drop(images) + predict = fluid.layers.fc(input=net, size=classdim, act='softmax') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor) + + # inference program + inference_program = fluid.default_main_program().clone() + with fluid.program_guard(inference_program): + inference_program = fluid.io.get_inference_program( + target_vars=[batch_acc, batch_size_tensor]) + + # Optimization + optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate) + + # data reader + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.cifar.train10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), + buf_size=5120), + batch_size=args.batch_size * args.gpus) + test_reader = paddle.batch( + paddle.dataset.cifar.test10() + if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), + batch_size=args.batch_size) + + return avg_cost, inference_program, optimizer, train_reader, test_reader, batch_acc diff --git a/benchmark/fluid/recordio_converter.py b/benchmark/fluid/recordio_converter.py new file mode 100644 index 0000000000..f2dc39109b --- /dev/null +++ b/benchmark/fluid/recordio_converter.py @@ -0,0 +1,164 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import random +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.dataset import mnist, cifar, flowers, image + + +def convert_2_recordio(py_reader, outfilepath, batch_size, shape_data, + shape_label): + num_batches = 0 + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(py_reader(), batch_size=batch_size) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=shape_data), + fluid.layers.data( + name='label', shape=shape_label, dtype='int64'), + ], + place=fluid.CPUPlace()) + num_batches = fluid.recordio_writer.convert_reader_to_recordio_file( + outfilepath, reader, feeder) + return num_batches + + +def prepare_mnist(outpath, batch_size): + outfilepath = os.path.join(outpath, "mnist.recordio") + convert_2_recordio(mnist.train, outfilepath, batch_size, [784], [1]) + + +def prepare_cifar10(outpath, batch_size): + outfilepath = os.path.join(outpath, "cifar.recordio") + convert_2_recordio(cifar.train10, outfilepath, batch_size, [3, 32, 32], [1]) + + +def prepare_flowers(outpath, batch_size): + outfilepath = os.path.join(outpath, "flowers.recordio") + convert_2_recordio(flowers.train, outfilepath, batch_size, [3, 224, 224], + [1]) + + +def default_mapper(sample): + img, label = sample + img = image.simple_transform( + img, 256, 224, True, mean=[103.94, 116.78, 123.68]) + return img.flatten().astype('float32'), label + + +def imagenet_train(data_dir): + contents = os.listdir(data_dir) + if set(contents) != set( + ["train", "train.txt", "val", "val_set", "val.txt", "unzip.sh"]): + raise Exception("Imagenet data contents error!") + img2label = dict() + imgfilelist = [] + with open(os.path.join(data_dir, "train.txt")) as fn: + while 1: + l = fn.readline() + if not l: + break + img, lbl = l[:-1].split(" ") + img2label[img] = int(lbl) + imgfilelist.append(img) + # shuffle all, this is slow + random.shuffle(imgfilelist) + + def train_reader(): + for idx, imgfile in enumerate(imgfilelist): + data = image.load_image( + os.path.join(data_dir, "train", imgfile.lower())) + label = [img2label[imgfile], ] + yield [data, label] + + return paddle.reader.map_readers(default_mapper, train_reader) + + +def imagenet_test(data_dir): + contents = os.listdir(data_dir) + if set(contents) != set( + ["train", "train.txt", "val", "val_set", "val.txt", "unzip.sh"]): + raise Exception("Imagenet data contents error!") + img2label = dict() + imgfilelist = [] + with open(os.path.join(data_dir, "val.txt")) as fn: + while 1: + l = fn.readline() + if not l: + break + img, lbl = l[:-1].split(" ") + img2label[img] = int(lbl) + imgfilelist.append(img) + + def test_reader(): + for idx, imgfile in enumerate(imgfilelist): + base_path = os.path.join(data_dir, "val", imgfile.split(".")[0]) + image_path = ".".join([base_path, "jpeg"]) + data = image.load_image(image_path) + label = [img2label[imgfile], ] + yield [data, label] + + return paddle.reader.map_readers(default_mapper, test_reader) + + +# FIXME(wuyi): delete this when https://github.com/PaddlePaddle/Paddle/pull/11066 is merged +def convert_reader_to_recordio_files( + filename, + batch_per_file, + reader_creator, + feeder, + compressor=core.RecordIOWriter.Compressor.Snappy, + max_num_records=1000, + feed_order=None): + if feed_order is None: + feed_order = feeder.feed_names + f_name, f_ext = os.path.splitext(filename) + assert (f_ext == ".recordio") + + lines = [] + f_idx = 0 + counter = 0 + for idx, batch in enumerate(reader_creator()): + lines.append(batch) + if idx >= batch_per_file and idx % batch_per_file == 0: + filename = "%s-%05d%s" % (f_name, f_idx, f_ext) + with fluid.recordio_writer.create_recordio_writer( + filename, compressor, max_num_records) as writer: + for l in lines: + res = feeder.feed(l) + for each in feed_order: + writer.append_tensor(res[each]) + writer.complete_append_tensor() + counter += 1 + lines = [] + f_idx += 1 + print("written file: ", filename) + return counter + + +def prepare_imagenet(inpath, outpath, batch_size): + r = paddle.batch(imagenet_train(inpath), batch_size=batch_size) + feeder = fluid.DataFeeder( + feed_list=[ + fluid.layers.data( + name="image", shape=[3, 224, 224]), fluid.layers.data( + name="label", shape=[1], dtype='int64') + ], + place=fluid.CPUPlace()) + outpath = os.path.join(outpath, "imagenet.recordio") + convert_reader_to_recordio_files(outpath, 10000, r, feeder) diff --git a/benchmark/fluid/resnet.py b/benchmark/fluid/resnet.py deleted file mode 100644 index 831fa2c019..0000000000 --- a/benchmark/fluid/resnet.py +++ /dev/null @@ -1,313 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import functools -import numpy as np -import time - -import cProfile, pstats, StringIO - -import paddle -import paddle.fluid as fluid -import paddle.fluid.core as core -import paddle.fluid.profiler as profiler - - -def parse_args(): - parser = argparse.ArgumentParser('Convolution model benchmark.') - parser.add_argument( - '--model', - type=str, - choices=['resnet_imagenet', 'resnet_cifar10'], - default='resnet_imagenet', - help='The model architecture.') - parser.add_argument( - '--batch_size', type=int, default=32, help='The minibatch size.') - parser.add_argument( - '--use_fake_data', - action='store_true', - help='use real data or fake data') - parser.add_argument( - '--skip_batch_num', - type=int, - default=5, - help='The first num of minibatch num to skip, for better performance test' - ) - parser.add_argument( - '--iterations', type=int, default=80, help='The number of minibatches.') - parser.add_argument( - '--pass_num', type=int, default=100, help='The number of passes.') - parser.add_argument( - '--data_format', - type=str, - default='NCHW', - choices=['NCHW', 'NHWC'], - help='The data data_format, now only support NCHW.') - parser.add_argument( - '--device', - type=str, - default='GPU', - choices=['CPU', 'GPU'], - help='The device type.') - parser.add_argument( - '--data_set', - type=str, - default='flowers', - choices=['cifar10', 'flowers'], - help='Optional dataset for benchmark.') - parser.add_argument( - '--infer_only', action='store_true', help='If set, run forward only.') - parser.add_argument( - '--use_cprof', action='store_true', help='If set, use cProfile.') - parser.add_argument( - '--use_nvprof', - action='store_true', - help='If set, use nvprof for CUDA.') - parser.add_argument( - '--with_test', - action='store_true', - help='If set, test the testset during training.') - args = parser.parse_args() - return args - - -def conv_bn_layer(input, ch_out, filter_size, stride, padding, act='relu'): - conv1 = fluid.layers.conv2d( - input=input, - filter_size=filter_size, - num_filters=ch_out, - stride=stride, - padding=padding, - act=None, - bias_attr=False) - return fluid.layers.batch_norm(input=conv1, act=act) - - -def shortcut(input, ch_out, stride): - ch_in = input.shape[1] if args.data_format == 'NCHW' else input.shape[-1] - if ch_in != ch_out: - return conv_bn_layer(input, ch_out, 1, stride, 0, None) - else: - return input - - -def basicblock(input, ch_out, stride): - short = shortcut(input, ch_out, stride) - conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) - conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, act=None) - return fluid.layers.elementwise_add(x=short, y=conv2, act='relu') - - -def bottleneck(input, ch_out, stride): - short = shortcut(input, ch_out * 4, stride) - conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) - conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) - conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, act=None) - return fluid.layers.elementwise_add(x=short, y=conv3, act='relu') - - -def layer_warp(block_func, input, ch_out, count, stride): - res_out = block_func(input, ch_out, stride) - for i in range(1, count): - res_out = block_func(res_out, ch_out, 1) - return res_out - - -def resnet_imagenet(input, class_dim, depth=50, data_format='NCHW'): - - cfg = { - 18: ([2, 2, 2, 1], basicblock), - 34: ([3, 4, 6, 3], basicblock), - 50: ([3, 4, 6, 3], bottleneck), - 101: ([3, 4, 23, 3], bottleneck), - 152: ([3, 8, 36, 3], bottleneck) - } - stages, block_func = cfg[depth] - conv1 = conv_bn_layer(input, ch_out=64, filter_size=7, stride=2, padding=3) - pool1 = fluid.layers.pool2d( - input=conv1, pool_type='avg', pool_size=3, pool_stride=2) - res1 = layer_warp(block_func, pool1, 64, stages[0], 1) - res2 = layer_warp(block_func, res1, 128, stages[1], 2) - res3 = layer_warp(block_func, res2, 256, stages[2], 2) - res4 = layer_warp(block_func, res3, 512, stages[3], 2) - pool2 = fluid.layers.pool2d( - input=res4, - pool_size=7, - pool_type='avg', - pool_stride=1, - global_pooling=True) - out = fluid.layers.fc(input=pool2, size=class_dim, act='softmax') - return out - - -def resnet_cifar10(input, class_dim, depth=32, data_format='NCHW'): - assert (depth - 2) % 6 == 0 - - n = (depth - 2) // 6 - - conv1 = conv_bn_layer( - input=input, ch_out=16, filter_size=3, stride=1, padding=1) - res1 = layer_warp(basicblock, conv1, 16, n, 1) - res2 = layer_warp(basicblock, res1, 32, n, 2) - res3 = layer_warp(basicblock, res2, 64, n, 2) - pool = fluid.layers.pool2d( - input=res3, pool_size=8, pool_type='avg', pool_stride=1) - out = fluid.layers.fc(input=pool, size=class_dim, act='softmax') - return out - - -def run_benchmark(model, args): - if args.use_cprof: - pr = cProfile.Profile() - pr.enable() - - if args.data_set == "cifar10": - class_dim = 10 - if args.data_format == 'NCHW': - dshape = [3, 32, 32] - else: - dshape = [32, 32, 3] - else: - class_dim = 102 - if args.data_format == 'NCHW': - dshape = [3, 224, 224] - else: - dshape = [224, 224, 3] - - input = fluid.layers.data(name='data', shape=dshape, dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - predict = model(input, class_dim) - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) - - inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) - - optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) - opts = optimizer.minimize(avg_cost) - - fluid.memory_optimize(fluid.default_main_program()) - - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), - buf_size=5120), - batch_size=args.batch_size) - test_reader = paddle.batch( - paddle.dataset.cifar.test10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), - batch_size=args.batch_size) - - def test(exe): - test_accuracy = fluid.average.WeightedAverage() - for batch_id, data in enumerate(test_reader()): - img_data = np.array(map(lambda x: x[0].reshape(dshape), - data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([-1, 1]) - - acc, weight = exe.run(inference_program, - feed={"data": img_data, - "label": y_data}, - fetch_list=[batch_acc, batch_size_tensor]) - test_accuracy.add(value=acc, weight=weight) - - return test_accuracy.eval() - - place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - accuracy = fluid.average.WeightedAverage() - if args.use_fake_data: - data = train_reader().next() - image = np.array(map(lambda x: x[0].reshape(dshape), data)).astype( - 'float32') - label = np.array(map(lambda x: x[1], data)).astype('int64') - label = label.reshape([-1, 1]) - - iters, num_samples, start_time = 0, 0, time.time() - for pass_id in range(args.pass_num): - accuracy.reset() - train_accs = [] - train_losses = [] - for batch_id, data in enumerate(train_reader()): - if iters == args.skip_batch_num: - start_time = time.time() - num_samples = 0 - if iters == args.iterations: - break - if not args.use_fake_data: - image = np.array(map(lambda x: x[0].reshape(dshape), - data)).astype('float32') - label = np.array(map(lambda x: x[1], data)).astype('int64') - label = label.reshape([-1, 1]) - loss, acc, weight = exe.run( - fluid.default_main_program(), - feed={'data': image, - 'label': label}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor]) - iters += 1 - num_samples += len(label) - accuracy.add(value=acc, weight=weight) - train_losses.append(loss) - train_accs.append(acc) - print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" % - (pass_id, iters, loss, acc)) - print("Pass: %d, Loss: %f, Train Accuray: %f\n" % - (pass_id, np.mean(train_losses), np.mean(train_accs))) - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % - (num_samples, train_elapsed, examples_per_sec)) - # evaluation - if args.with_test: - pass_test_acc = test(exe) - exit(0) - - -def print_arguments(args): - vars(args)['use_nvprof'] = (vars(args)['use_nvprof'] and - vars(args)['device'] == 'GPU') - print('----------- resnet Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == '__main__': - model_map = { - 'resnet_imagenet': resnet_imagenet, - 'resnet_cifar10': resnet_cifar10 - } - args = parse_args() - print_arguments(args) - if args.data_format == 'NHWC': - raise ValueError('Only support NCHW data_format now.') - if args.use_nvprof and args.device == 'GPU': - with profiler.cuda_profiler("cuda_profiler.txt", 'csv') as nvprof: - run_benchmark(model_map[args.model], args) - else: - run_benchmark(model_map[args.model], args) diff --git a/benchmark/fluid/run.sh b/benchmark/fluid/run.sh index f6dfd20bf2..5d9b2db871 100644 --- a/benchmark/fluid/run.sh +++ b/benchmark/fluid/run.sh @@ -2,6 +2,7 @@ # This script benchmarking the PaddlePaddle Fluid on # single thread single GPU. +mkdir -p logs #export FLAGS_fraction_of_gpu_memory_to_use=0.0 export CUDNN_PATH=/paddle/cudnn_v5 @@ -35,71 +36,74 @@ nohup stdbuf -oL nvidia-smi \ --format=csv \ --filename=mem.log \ -l 1 & + # mnist # mnist gpu mnist 128 -FLAGS_benchmark=true stdbuf -oL python fluid/mnist.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=mnist \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ --iterations=500 \ - 2>&1 | tee -a mnist_gpu_128.log + 2>&1 | tee -a logs/mnist_gpu_128.log # vgg16 # gpu cifar10 128 -FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=vgg16 \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a vgg16_gpu_128.log + 2>&1 | tee -a logs/vgg16_gpu_128.log # flowers gpu 128 -FLAGS_benchmark=true stdbuf -oL python fluid/vgg16.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=vgg16 \ --device=GPU \ --batch_size=32 \ --data_set=flowers \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a vgg16_gpu_flowers_32.log + 2>&1 | tee -a logs/vgg16_gpu_flowers_32.log # resnet50 # resnet50 gpu cifar10 128 -FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=resnet \ --device=GPU \ --batch_size=128 \ --data_set=cifar10 \ - --model=resnet_cifar10 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a resnet50_gpu_128.log + 2>&1 | tee -a logs/resnet50_gpu_128.log # resnet50 gpu flowers 64 -FLAGS_benchmark=true stdbuf -oL python fluid/resnet50.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=resnet \ --device=GPU \ --batch_size=64 \ --data_set=flowers \ - --model=resnet_imagenet \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a resnet50_gpu_flowers_64.log + 2>&1 | tee -a logs/resnet50_gpu_flowers_64.log # lstm # lstm gpu imdb 32 # tensorflow only support batch=32 -FLAGS_benchmark=true stdbuf -oL python fluid/stacked_dynamic_lstm.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=stacked_dynamic_lstm \ --device=GPU \ --batch_size=32 \ --skip_batch_num=5 \ --iterations=30 \ - --hidden_dim=512 \ - --emb_dim=512 \ - --crop_size=1500 \ - 2>&1 | tee -a lstm_gpu_32.log + 2>&1 | tee -a logs/lstm_gpu_32.log # seq2seq # seq2seq gpu wmb 128 -FLAGS_benchmark=true stdbuf -oL python fluid/machine_translation.py \ +FLAGS_benchmark=true stdbuf -oL python fluid_benchmark.py \ + --model=machine_translation \ --device=GPU \ --batch_size=128 \ --skip_batch_num=5 \ --iterations=30 \ - 2>&1 | tee -a lstm_gpu_128.log + 2>&1 | tee -a logs/lstm_gpu_128.log diff --git a/benchmark/fluid/run_fluid_benchmark.sh b/benchmark/fluid/run_fluid_benchmark.sh new file mode 100644 index 0000000000..4309a3126c --- /dev/null +++ b/benchmark/fluid/run_fluid_benchmark.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +PADDLE_TRAINING_ROLE=PSERVER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=2 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model resnet --device CPU --update_method pserver --iterations=10000 & + +sleep 15 + +CUDA_VISIBLE_DEVICES=0,1 PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=2 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=0 python fluid_benchmark.py --model resnet --device GPU --update_method pserver --iterations=10000 --gpus 2 & + +CUDA_VISIBLE_DEVICES=2,3 PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_PORT=7164 PADDLE_PSERVER_IPS=127.0.0.1 PADDLE_TRAINERS=2 PADDLE_CURRENT_IP=127.0.0.1 PADDLE_TRAINER_ID=1 python fluid_benchmark.py --model resnet --device GPU --update_method pserver --iterations=10000 --gpus 2 & diff --git a/benchmark/fluid/stacked_dynamic_lstm.py b/benchmark/fluid/stacked_dynamic_lstm.py deleted file mode 100644 index 73bcc47b4d..0000000000 --- a/benchmark/fluid/stacked_dynamic_lstm.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import argparse -import cPickle -import os -import random -import time - -import numpy -import paddle -import paddle.dataset.imdb as imdb -import paddle.fluid as fluid -import paddle.batch as batch -import paddle.fluid.profiler as profiler - - -def parse_args(): - parser = argparse.ArgumentParser("Understand Sentiment by Dynamic RNN.") - parser.add_argument( - '--batch_size', - type=int, - default=32, - help='The sequence number of a batch data. (default: %(default)d)') - parser.add_argument( - '--skip_batch_num', - type=int, - default=5, - help='The first num of minibatch num to skip, for better performance test' - ) - parser.add_argument( - '--iterations', type=int, default=80, help='The number of minibatches.') - parser.add_argument( - '--emb_dim', - type=int, - default=512, - help='Dimension of embedding table. (default: %(default)d)') - parser.add_argument( - '--hidden_dim', - type=int, - default=512, - help='Hidden size of lstm unit. (default: %(default)d)') - parser.add_argument( - '--pass_num', - type=int, - default=100, - help='Epoch number to train. (default: %(default)d)') - parser.add_argument( - '--device', - type=str, - default='CPU', - choices=['CPU', 'GPU'], - help='The device type.') - parser.add_argument( - '--crop_size', - type=int, - default=int(os.environ.get('CROP_SIZE', '1500')), - help='The max sentence length of input. Since this model use plain RNN,' - ' Gradient could be explored if sentence is too long') - parser.add_argument( - '--with_test', - action='store_true', - help='If set, test the testset during training.') - args = parser.parse_args() - return args - - -word_dict = imdb.word_dict() - - -def crop_sentence(reader, crop_size): - unk_value = word_dict[''] - - def __impl__(): - for item in reader(): - if len([x for x in item[0] if x != unk_value]) < crop_size: - yield item - - return __impl__ - - -def main(): - args = parse_args() - lstm_size = args.hidden_dim - - data = fluid.layers.data( - name="words", shape=[1], lod_level=1, dtype='int64') - sentence = fluid.layers.embedding( - input=data, size=[len(word_dict), args.emb_dim]) - - sentence = fluid.layers.fc(input=sentence, size=lstm_size, act='tanh') - - rnn = fluid.layers.DynamicRNN() - with rnn.block(): - word = rnn.step_input(sentence) - prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) - prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) - - def gate_common( - ipt, - hidden, - size, ): - gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) - gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) - gate = fluid.layers.sums(input=[gate0, gate1]) - return gate - - forget_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) - input_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) - output_gate = fluid.layers.sigmoid( - x=gate_common(word, prev_hidden, lstm_size)) - cell_gate = fluid.layers.tanh( - x=gate_common(word, prev_hidden, lstm_size)) - - cell = fluid.layers.sums(input=[ - fluid.layers.elementwise_mul( - x=forget_gate, y=prev_cell), fluid.layers.elementwise_mul( - x=input_gate, y=cell_gate) - ]) - - hidden = fluid.layers.elementwise_mul( - x=output_gate, y=fluid.layers.tanh(x=cell)) - - rnn.update_memory(prev_cell, cell) - rnn.update_memory(prev_hidden, hidden) - rnn.output(hidden) - - last = fluid.layers.sequence_pool(rnn(), 'last') - logit = fluid.layers.fc(input=last, size=2, act='softmax') - loss = fluid.layers.cross_entropy( - input=logit, - label=fluid.layers.data( - name='label', shape=[1], dtype='int64')) - loss = fluid.layers.mean(x=loss) - - # add acc - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy(input=logit, label=fluid.layers.data(name='label', \ - shape=[1], dtype='int64'), total=batch_size_tensor) - - inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) - - adam = fluid.optimizer.Adam() - adam.minimize(loss) - - fluid.memory_optimize(fluid.default_main_program()) - - place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) - - train_reader = batch( - paddle.reader.shuffle( - crop_sentence(imdb.train(word_dict), args.crop_size), - buf_size=25000), - batch_size=args.batch_size) - - iters, num_samples, start_time = 0, 0, time.time() - for pass_id in range(args.pass_num): - train_accs = [] - train_losses = [] - for batch_id, data in enumerate(train_reader()): - if iters == args.skip_batch_num: - start_time = time.time() - num_samples = 0 - if iters == args.iterations: - break - tensor_words = to_lodtensor([x[0] for x in data], place) - label = numpy.array([x[1] for x in data]).astype("int64") - label = label.reshape((-1, 1)) - loss_np, acc, weight = exe.run( - fluid.default_main_program(), - feed={"words": tensor_words, - "label": label}, - fetch_list=[loss, batch_acc, batch_size_tensor]) - iters += 1 - for x in data: - num_samples += len(x[0]) - print( - "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % - (pass_id, iters, loss_np, acc) - ) # The accuracy is the accumulation of batches, but not the current batch. - - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % - (num_samples, train_elapsed, examples_per_sec)) - exit(0) - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = numpy.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def print_arguments(args): - print('----------- lstm Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == '__main__': - args = parse_args() - print_arguments(args) - main() diff --git a/benchmark/fluid/vgg.py b/benchmark/fluid/vgg.py deleted file mode 100644 index 53e34e0cbd..0000000000 --- a/benchmark/fluid/vgg.py +++ /dev/null @@ -1,224 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -"""VGG16 benchmark in Fluid""" -from __future__ import print_function - -import sys -import time -import numpy as np -import paddle -import paddle.fluid as fluid -import paddle.fluid.core as core -import argparse -import functools - -parser = argparse.ArgumentParser(description=__doc__) -parser.add_argument( - '--batch_size', type=int, default=128, help="Batch size for training.") -parser.add_argument( - '--skip_batch_num', - type=int, - default=5, - help='The first num of minibatch num to skip, for better performance test') -parser.add_argument( - '--iterations', type=int, default=80, help='The number of minibatches.') -parser.add_argument( - '--learning_rate', - type=float, - default=1e-3, - help="Learning rate for training.") -parser.add_argument('--pass_num', type=int, default=50, help="No. of passes.") -parser.add_argument( - '--device', - type=str, - default='GPU', - choices=['CPU', 'GPU'], - help="The device type.") -parser.add_argument( - '--data_format', - type=str, - default='NCHW', - choices=['NCHW', 'NHWC'], - help='The data order, now only support NCHW.') -parser.add_argument( - '--data_set', - type=str, - default='cifar10', - choices=['cifar10', 'flowers'], - help='Optional dataset for benchmark.') -parser.add_argument( - '--with_test', - action='store_true', - help='If set, test the testset during training.') -args = parser.parse_args() - - -def vgg16_bn_drop(input): - def conv_block(input, num_filter, groups, dropouts): - return fluid.nets.img_conv_group( - input=input, - pool_size=2, - pool_stride=2, - conv_num_filter=[num_filter] * groups, - conv_filter_size=3, - conv_act='relu', - conv_with_batchnorm=True, - conv_batchnorm_drop_rate=dropouts, - pool_type='max') - - conv1 = conv_block(input, 64, 2, [0.3, 0]) - conv2 = conv_block(conv1, 128, 2, [0.4, 0]) - conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) - conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) - conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) - - drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) - fc1 = fluid.layers.fc(input=drop, size=512, act=None) - bn = fluid.layers.batch_norm(input=fc1, act='relu') - drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) - fc2 = fluid.layers.fc(input=drop2, size=512, act=None) - return fc2 - - -def main(): - if args.data_set == "cifar10": - classdim = 10 - if args.data_format == 'NCHW': - data_shape = [3, 32, 32] - else: - data_shape = [32, 32, 3] - else: - classdim = 102 - if args.data_format == 'NCHW': - data_shape = [3, 224, 224] - else: - data_shape = [224, 224, 3] - - # Input data - images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - - # Train program - net = vgg16_bn_drop(images) - predict = fluid.layers.fc(input=net, size=classdim, act='softmax') - cost = fluid.layers.cross_entropy(input=predict, label=label) - avg_cost = fluid.layers.mean(x=cost) - - # Evaluator - batch_size_tensor = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( - input=predict, label=label, total=batch_size_tensor) - - # inference program - inference_program = fluid.default_main_program().clone() - with fluid.program_guard(inference_program): - inference_program = fluid.io.get_inference_program( - target_vars=[batch_acc, batch_size_tensor]) - - # Optimization - optimizer = fluid.optimizer.Adam(learning_rate=args.learning_rate) - opts = optimizer.minimize(avg_cost) - - fluid.memory_optimize(fluid.default_main_program()) - - # Initialize executor - place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0) - exe = fluid.Executor(place) - - # Parameter initialization - exe.run(fluid.default_startup_program()) - - # data reader - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.cifar.train10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.train(), - buf_size=5120), - batch_size=args.batch_size) - test_reader = paddle.batch( - paddle.dataset.cifar.test10() - if args.data_set == 'cifar10' else paddle.dataset.flowers.test(), - batch_size=args.batch_size) - - # test - def test(exe): - test_accuracy = fluid.average.WeightedAverage() - for batch_id, data in enumerate(test_reader()): - img_data = np.array(map(lambda x: x[0].reshape(data_shape), - data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([-1, 1]) - - acc, weight = exe.run(inference_program, - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[batch_acc, batch_size_tensor]) - test_accuracy.add(value=acc, weight=weight) - return test_accuracy.eval() - - iters, num_samples, start_time = 0, 0, time.time() - accuracy = fluid.average.WeightedAverage() - for pass_id in range(args.pass_num): - accuracy.reset() - train_accs = [] - train_losses = [] - for batch_id, data in enumerate(train_reader()): - if iters == args.skip_batch_num: - start_time = time.time() - num_samples = 0 - if iters == args.iterations: - break - img_data = np.array(map(lambda x: x[0].reshape(data_shape), - data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") - y_data = y_data.reshape([-1, 1]) - - loss, acc, weight = exe.run( - fluid.default_main_program(), - feed={"pixel": img_data, - "label": y_data}, - fetch_list=[avg_cost, batch_acc, batch_size_tensor]) - accuracy.add(value=acc, weight=weight) - iters += 1 - num_samples += len(y_data) - print( - "Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" % - (pass_id, iters, loss, acc) - ) # The accuracy is the accumulation of batches, but not the current batch. - - # pass_train_acc = accuracy.eval() - train_losses.append(loss) - train_accs.append(acc) - print("Pass: %d, Loss: %f, Train Accuray: %f\n" % - (pass_id, np.mean(train_losses), np.mean(train_accs))) - train_elapsed = time.time() - start_time - examples_per_sec = num_samples / train_elapsed - print('\nTotal examples: %d, total time: %.5f, %.5f examples/sed\n' % - (num_samples, train_elapsed, examples_per_sec)) - # evaluation - if args.with_test: - pass_test_acc = test(exe) - exit(0) - - -def print_arguments(): - print('----------- vgg Configuration Arguments -----------') - for arg, value in sorted(vars(args).iteritems()): - print('%s: %s' % (arg, value)) - print('------------------------------------------------') - - -if __name__ == "__main__": - print_arguments() - main() diff --git a/benchmark/paddle/image/run.sh b/benchmark/paddle/image/run.sh index 717ed487ba..5b58a8d773 100755 --- a/benchmark/paddle/image/run.sh +++ b/benchmark/paddle/image/run.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function train() { diff --git a/benchmark/paddle/image/run_mkl_infer.sh b/benchmark/paddle/image/run_mkl_infer.sh index 62c9bf6efd..0fad5e04cc 100755 --- a/benchmark/paddle/image/run_mkl_infer.sh +++ b/benchmark/paddle/image/run_mkl_infer.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function clock_to_seconds() { diff --git a/benchmark/paddle/image/run_mkl_train.sh b/benchmark/paddle/image/run_mkl_train.sh index 03d2d378fb..1583bf134a 100755 --- a/benchmark/paddle/image/run_mkl_train.sh +++ b/benchmark/paddle/image/run_mkl_train.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function train() { diff --git a/benchmark/paddle/image/run_openblas_infer.sh b/benchmark/paddle/image/run_openblas_infer.sh index a9a7b8a667..987381cabc 100755 --- a/benchmark/paddle/image/run_openblas_infer.sh +++ b/benchmark/paddle/image/run_openblas_infer.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function clock_to_seconds() { diff --git a/benchmark/paddle/image/run_openblas_train.sh b/benchmark/paddle/image/run_openblas_train.sh index 935cff6f2c..cc64e1d09d 100755 --- a/benchmark/paddle/image/run_openblas_train.sh +++ b/benchmark/paddle/image/run_openblas_train.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function train() { diff --git a/benchmark/paddle/rnn/run.sh b/benchmark/paddle/rnn/run.sh index e9dfeb2e52..f99a562b3f 100755 --- a/benchmark/paddle/rnn/run.sh +++ b/benchmark/paddle/rnn/run.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function train() { diff --git a/benchmark/tensorflow/image/run.sh b/benchmark/tensorflow/image/run.sh index eade36beb9..cf894fe3f2 100755 --- a/benchmark/tensorflow/image/run.sh +++ b/benchmark/tensorflow/image/run.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function test() { diff --git a/benchmark/tensorflow/image/run_multi.sh b/benchmark/tensorflow/image/run_multi.sh index 69faa43317..bf1435bc55 100755 --- a/benchmark/tensorflow/image/run_multi.sh +++ b/benchmark/tensorflow/image/run_multi.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function test() { diff --git a/benchmark/tensorflow/rnn/run.sh b/benchmark/tensorflow/rnn/run.sh index bb4c69cb95..db10eefdea 100755 --- a/benchmark/tensorflow/rnn/run.sh +++ b/benchmark/tensorflow/rnn/run.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function test() { diff --git a/benchmark/tensorflow/rnn/run_multi.sh b/benchmark/tensorflow/rnn/run_multi.sh index c2d7dd597e..ec62fc26b5 100755 --- a/benchmark/tensorflow/rnn/run_multi.sh +++ b/benchmark/tensorflow/rnn/run_multi.sh @@ -1,3 +1,5 @@ +#!/bin/bash + set -e function test() { diff --git a/cmake/cblas.cmake b/cmake/cblas.cmake index e3b9d94215..6ed51c6484 100644 --- a/cmake/cblas.cmake +++ b/cmake/cblas.cmake @@ -83,18 +83,20 @@ else() set(REFERENCE_CBLAS_LIB_SEARCH_PATHS ${REFERENCE_CBLAS_ROOT}/lib) endif() -find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS +if(WITH_SYSTEM_BLAS) + find_path(REFERENCE_CBLAS_INCLUDE_DIR NAMES cblas.h PATHS ${REFERENCE_CBLAS_INCLUDE_SEARCH_PATHS}) -find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS + find_library(REFERENCE_CBLAS_LIBRARY NAMES cblas PATHS ${REFERENCE_CBLAS_LIB_SEARCH_PATHS}) -if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) - set(CBLAS_FOUND ON) - set(CBLAS_PROVIDER REFERENCE) - set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) - set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY}) - add_definitions(-DPADDLE_USE_REFERENCE_CBLAS) - message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + if(REFERENCE_CBLAS_INCLUDE_DIR AND REFERENCE_CBLAS_LIBRARY) + set(CBLAS_FOUND ON) + set(CBLAS_PROVIDER REFERENCE) + set(CBLAS_INC_DIR ${REFERENCE_CBLAS_INCLUDE_DIR}) + set(CBLAS_LIBRARIES ${REFERENCE_CBLAS_LIBRARY}) + add_definitions(-DPADDLE_USE_REFERENCE_CBLAS) + message(STATUS "Found reference-cblas (include: ${CBLAS_INC_DIR}, library: ${CBLAS_LIBRARIES})") + endif() endif() if(IOS_USE_VECLIB_FOR_BLAS AND VECLIB_FOUND) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index e490397cc0..c35096e09b 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -41,6 +41,10 @@ if(USE_EIGEN_FOR_BLAS) add_definitions(-DPADDLE_USE_EIGEN_FOR_BLAS) endif(USE_EIGEN_FOR_BLAS) +if(EIGEN_USE_THREADS) + add_definitions(-DEIGEN_USE_THREADS) +endif(EIGEN_USE_THREADS) + if(NOT WITH_PROFILER) add_definitions(-DPADDLE_DISABLE_PROFILER) endif(NOT WITH_PROFILER) @@ -88,8 +92,19 @@ if(WITH_GPU) if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7) message(FATAL_ERROR "TensorRT needs CUDNN >= 7.0 to compile") endif() + if(${TENSORRT_MAJOR_VERSION} VERSION_LESS 4) + message(FATAL_ERROR "Paddle needs TensorRT >= 4.0 to compile") + endif() include_directories(${TENSORRT_INCLUDE_DIR}) endif() + if(WITH_ANAKIN) + if(${CUDA_VERSION_MAJOR} VERSION_LESS 8) + message(FATAL_ERROR "Anakin needs CUDA >= 8.0 to compile") + endif() + if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7) + message(FATAL_ERROR "Anakin needs CUDNN >= 7.0 to compile") + endif() + endif() elseif(WITH_AMD_GPU) add_definitions(-DPADDLE_WITH_HIP) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -D__HIP_PLATFORM_HCC__") @@ -111,6 +126,10 @@ endif() set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${SIMD_FLAG}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAG}") +if(WITH_DISTRIBUTE) + add_definitions(-DPADDLE_WITH_DISTRIBUTE) +endif() + if(WITH_GOLANG) # we need to symlink Paddle directory into GOPATH. If we # don't do it and we have code that depends on Paddle, go @@ -159,3 +178,11 @@ if(WITH_GOLANG) endif() endif(WITH_GOLANG) + +if(WITH_GRPC) + add_definitions(-DPADDLE_WITH_GRPC) +endif(WITH_GRPC) + +if(WITH_BRPC_RDMA) + add_definitions(-DPADDLE_WITH_BRPC_RDMA) +endif(WITH_BRPC_RDMA) diff --git a/cmake/cpplint.cmake b/cmake/cpplint.cmake deleted file mode 100644 index 4823dc3e91..0000000000 --- a/cmake/cpplint.cmake +++ /dev/null @@ -1,62 +0,0 @@ -# util to check C++ file style -# * it basically use google cpplint.py. -# * It provide "add_style_check_target" for cmake. -# Usage see add_style_check_target's document -# -# TODO(yuyang18): Add python style check. - -set(STYLE_FILTER) - -# diable unwanted filters - -# paddle do not indent public/potected/private in class -set(STYLE_FILTER "${STYLE_FILTER}-whitespace/indent,") -# paddle use mutable reference. BUT IT IS NOT RECOMMANDED -set(STYLE_FILTER "${STYLE_FILTER}-runtime/references,") -# paddle use relative path for include. -set(STYLE_FILTER "${STYLE_FILTER}-build/include,") -# paddle use , , etc. -set(STYLE_FILTER "${STYLE_FILTER}-build/c++11,") -# paddle use c style casting. BUT IT IS NOT RECOMMANDED -set(STYLE_FILTER "${STYLE_FILTER}-readability/casting") - - -# IGNORE SOME FILES -set(IGNORE_PATTERN - .*ImportanceSampler.* - .*cblas\\.h.* - .*\\.pb\\.txt - .*MultiDataProvider.* - .*pb.* - .*pybind.h) - -# add_style_check_target -# -# attach check code style step for target. -# -# first argument: target name to attach -# rest arguments: source list to check code style. -# -# NOTE: If WITH_STYLE_CHECK is OFF, then this macro just do nothing. -macro(add_style_check_target TARGET_NAME) - if(WITH_STYLE_CHECK) - set(SOURCES_LIST ${ARGN}) - list(REMOVE_DUPLICATES SOURCES_LIST) - foreach(filename ${SOURCES_LIST}) - foreach(pattern ${IGNORE_PATTERN}) - if(filename MATCHES ${pattern}) - list(REMOVE_ITEM SOURCES_LIST ${filename}) - endif() - endforeach() - endforeach() - - if(SOURCES_LIST) - add_custom_command(TARGET ${TARGET_NAME} POST_BUILD - COMMAND "${PYTHON_EXECUTABLE}" "${PADDLE_SOURCE_DIR}/paddle/scripts/cpplint.py" - "--filter=${STYLE_FILTER}" - ${SOURCES_LIST} - COMMENT "cpplint: Checking source code style" - WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) - endif() - endif() -endmacro() diff --git a/cmake/cudnn.cmake b/cmake/cudnn.cmake index 2c84061ff5..9eebea816c 100644 --- a/cmake/cudnn.cmake +++ b/cmake/cudnn.cmake @@ -21,6 +21,7 @@ list(APPEND CUDNN_CHECK_LIBRARY_DIRS ${CUDNN_ROOT}/lib64 ${CUDNN_ROOT}/lib ${CUDNN_ROOT}/lib/${TARGET_ARCH}-linux-gnu + ${CUDNN_ROOT}/local/cuda-${CUDA_VERSION}/targets/${TARGET_ARCH}-linux/lib/ $ENV{CUDNN_ROOT} $ENV{CUDNN_ROOT}/lib64 $ENV{CUDNN_ROOT}/lib diff --git a/cmake/external/anakin.cmake b/cmake/external/anakin.cmake new file mode 100644 index 0000000000..403873a510 --- /dev/null +++ b/cmake/external/anakin.cmake @@ -0,0 +1,64 @@ +if (NOT WITH_ANAKIN) + return() +endif() + +INCLUDE(ExternalProject) +set(ANAKIN_SOURCE_DIR ${THIRD_PARTY_PATH}/anakin) +# the anakin install dir is only default one now +set(ANAKIN_INSTALL_DIR ${THIRD_PARTY_PATH}/anakin/src/extern_anakin/output) +set(ANAKIN_INCLUDE ${ANAKIN_INSTALL_DIR}) +set(ANAKIN_LIBRARY ${ANAKIN_INSTALL_DIR}) +set(ANAKIN_SHARED_LIB ${ANAKIN_LIBRARY}/libanakin.so) +set(ANAKIN_SABER_LIB ${ANAKIN_LIBRARY}/libanakin_saber_common.so) + +# TODO(luotao): ANAKIN_MODLE_URL will move to demo ci later. +set(ANAKIN_MODLE_URL "http://paddle-inference-dist.bj.bcebos.com/mobilenet_v2.anakin.bin") +execute_process(COMMAND bash -c "mkdir -p ${ANAKIN_SOURCE_DIR}") +execute_process(COMMAND bash -c "cd ${ANAKIN_SOURCE_DIR}; wget -q --no-check-certificate ${ANAKIN_MODLE_URL}") + +include_directories(${ANAKIN_INCLUDE}) +include_directories(${ANAKIN_INCLUDE}/saber/) + +set(ANAKIN_COMPILE_EXTRA_FLAGS + -Wno-error=unused-but-set-variable -Wno-unused-but-set-variable + -Wno-error=unused-variable -Wno-unused-variable + -Wno-error=format-extra-args -Wno-format-extra-args + -Wno-error=comment -Wno-comment + -Wno-error=format -Wno-format + -Wno-error=switch -Wno-switch + -Wno-error=return-type -Wno-return-type + -Wno-error=non-virtual-dtor -Wno-non-virtual-dtor + -Wno-sign-compare + -Wno-reorder + -Wno-error=cpp) + +ExternalProject_Add( + extern_anakin + ${EXTERNAL_PROJECT_LOG_ARGS} + # TODO(luotao): use PaddlePaddle/Anakin later + GIT_REPOSITORY "https://github.com/luotao1/Anakin" + GIT_TAG "3957ae9263eaa0b1986758dac60a88852afb09be" + PREFIX ${ANAKIN_SOURCE_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DUSE_GPU_PLACE=YES + -DUSE_X86_PLACE=YES + -DBUILD_WITH_UNIT_TEST=NO + -DPROTOBUF_ROOT=${THIRD_PARTY_PATH}/install/protobuf + -DMKLML_ROOT=${THIRD_PARTY_PATH}/install/mklml + -DCUDNN_ROOT=${CUDNN_ROOT} + ${EXTERNAL_OPTIONAL_ARGS} + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ANAKIN_INSTALL_DIR} +) + +message(STATUS "Anakin for inference is enabled") +message(STATUS "Anakin is set INCLUDE:${ANAKIN_INCLUDE} LIBRARY:${ANAKIN_LIBRARY}") + +add_library(anakin_shared SHARED IMPORTED GLOBAL) +set_property(TARGET anakin_shared PROPERTY IMPORTED_LOCATION ${ANAKIN_SHARED_LIB}) +add_dependencies(anakin_shared extern_anakin protobuf mklml) + +add_library(anakin_saber SHARED IMPORTED GLOBAL) +set_property(TARGET anakin_saber PROPERTY IMPORTED_LOCATION ${ANAKIN_SABER_LIB}) +add_dependencies(anakin_saber extern_anakin protobuf mklml) + +list(APPEND external_project_dependencies anakin_shared anakin_saber) diff --git a/cmake/external/boost.cmake b/cmake/external/boost.cmake index 10662fc967..73713d93d5 100644 --- a/cmake/external/boost.cmake +++ b/cmake/external/boost.cmake @@ -23,8 +23,12 @@ set(BOOST_PROJECT "extern_boost") # checked that the devtools package of CentOS 6 installs boost 1.41.0. # So we use 1.41.0 here. set(BOOST_VER "1.41.0") -set(BOOST_TAR "boost_1_41_0") -set(BOOST_URL "http://paddlepaddledeps.bj.bcebos.com/${BOOST_TAR}.tar.gz") +if((NOT DEFINED BOOST_TAR) OR (NOT DEFINED BOOST_URL)) + message(STATUS "use pre defined download url") + set(BOOST_TAR "boost_1_41_0" CACHE STRING "" FORCE) + set(BOOST_URL "http://paddlepaddledeps.cdn.bcebos.com/${BOOST_TAR}.tar.gz" CACHE STRING "" FORCE) +endif() +MESSAGE(STATUS "BOOST_TAR: ${BOOST_TAR}, BOOST_URL: ${BOOST_URL}") set(BOOST_SOURCES_DIR ${THIRD_PARTY_PATH}/boost) set(BOOST_DOWNLOAD_DIR "${BOOST_SOURCES_DIR}/src/${BOOST_PROJECT}") set(BOOST_INCLUDE_DIR "${BOOST_DOWNLOAD_DIR}/${BOOST_TAR}" CACHE PATH "boost include directory." FORCE) diff --git a/cmake/external/brpc.cmake b/cmake/external/brpc.cmake new file mode 100644 index 0000000000..30b227b645 --- /dev/null +++ b/cmake/external/brpc.cmake @@ -0,0 +1,69 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +find_library(SSL_LIBRARY NAMES ssl) +ADD_LIBRARY(ssl SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET ssl PROPERTY IMPORTED_LOCATION ${SSL_LIBRARY}) + +find_library(CRYPTO_LIBRARY NAMES crypto) +ADD_LIBRARY(crypto SHARED IMPORTED GLOBAL) +SET_PROPERTY(TARGET crypto PROPERTY IMPORTED_LOCATION ${CRYPTO_LIBRARY}) + + +SET(BRPC_SOURCES_DIR ${THIRD_PARTY_PATH}/brpc) +SET(BRPC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/brpc) +SET(BRPC_INCLUDE_DIR "${BRPC_INSTALL_DIR}/include" CACHE PATH "brpc include directory." FORCE) +SET(BRPC_LIBRARIES "${BRPC_INSTALL_DIR}/lib/libbrpc.a" CACHE FILEPATH "brpc library." FORCE) + +INCLUDE_DIRECTORIES(${BRPC_INCLUDE_DIR}) + +# Reference https://stackoverflow.com/questions/45414507/pass-a-list-of-prefix-paths-to-externalproject-add-in-cmake-args +set(prefix_path "${THIRD_PARTY_PATH}/install/gflags|${THIRD_PARTY_PATH}/install/leveldb|${THIRD_PARTY_PATH}/install/snappy|${THIRD_PARTY_PATH}/install/gtest|${THIRD_PARTY_PATH}/install/protobuf|${THIRD_PARTY_PATH}/install/zlib") + +# If minimal .a is need, you can set WITH_DEBUG_SYMBOLS=OFF +ExternalProject_Add( + extern_brpc + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/gongweibao/brpc" + GIT_TAG "7dc04defad1fd4173aae170c3fcbde131b65155a" + PREFIX ${BRPC_SOURCES_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} + -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS} + -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS} + -DCMAKE_INSTALL_PREFIX=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR=${BRPC_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE=ON + -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} + -DCMAKE_PREFIX_PATH=${prefix_path} + -DBRPC_WITH_GLOG=ON + -DIOBUF_WITH_HUGE_BLOCK=ON + -DBRPC_WITH_RDMA=${WITH_BRPC_RDMA} + ${EXTERNAL_OPTIONAL_ARGS} + LIST_SEPARATOR | + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${BRPC_INSTALL_DIR} + -DCMAKE_INSTALL_LIBDIR:PATH=${BRPC_INSTALL_DIR}/lib + -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON + -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} +) +ADD_DEPENDENCIES(extern_brpc protobuf ssl crypto leveldb gflags glog gtest snappy) +ADD_LIBRARY(brpc STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET brpc PROPERTY IMPORTED_LOCATION ${BRPC_LIBRARIES}) +ADD_DEPENDENCIES(brpc extern_brpc) + + +LIST(APPEND external_project_dependencies brpc) diff --git a/cmake/external/cub.cmake b/cmake/external/cub.cmake new file mode 100644 index 0000000000..c94849cf4b --- /dev/null +++ b/cmake/external/cub.cmake @@ -0,0 +1,35 @@ +if(NOT WITH_GPU) + return() +endif() + +include(ExternalProject) + +set(CUB_SOURCE_DIR ${THIRD_PARTY_PATH}/cub) +set(CUB_INCLUDE_DIR ${CUB_SOURCE_DIR}/src/extern_cub) + +include_directories(${CUB_INCLUDE_DIR}) + +ExternalProject_Add( + extern_cub + ${EXTERNAL_PROJECT_LOG_ARGS} + GIT_REPOSITORY "https://github.com/NVlabs/cub.git" + GIT_TAG "v1.8.0" + PREFIX ${CUB_SOURCE_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" +) + +if(${CMAKE_VERSION} VERSION_LESS "3.3.0") + set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cub_dummy.c) + file(WRITE ${dummyfile} "const char *dummy = \"${dummyfile}\";") + add_library(cub STATIC ${dummyfile}) +else() + add_library(cub INTERFACE) +endif() + +add_dependencies(cub extern_cub) + +LIST(APPEND externl_project_dependencies cub) diff --git a/cmake/external/eigen.cmake b/cmake/external/eigen.cmake index edc93c2773..e029300eee 100644 --- a/cmake/external/eigen.cmake +++ b/cmake/external/eigen.cmake @@ -21,11 +21,12 @@ else() ExternalProject_Add( extern_eigen3 ${EXTERNAL_PROJECT_LOG_ARGS} - GIT_REPOSITORY "https://github.com/RLovelett/eigen.git" + GIT_REPOSITORY "https://github.com/eigenteam/eigen-git-mirror" # eigen on cuda9.1 missing header of math_funtions.hpp # https://stackoverflow.com/questions/43113508/math-functions-hpp-not-found-when-using-cuda-with-eigen GIT_TAG 917060c364181f33a735dc023818d5a54f60e54c PREFIX ${EIGEN_SOURCE_DIR} + DOWNLOAD_NAME "eigen" UPDATE_COMMAND "" CONFIGURE_COMMAND "" BUILD_COMMAND "" diff --git a/cmake/external/grpc.cmake b/cmake/external/grpc.cmake index e90948782b..7fb67afbe1 100644 --- a/cmake/external/grpc.cmake +++ b/cmake/external/grpc.cmake @@ -23,21 +23,34 @@ SET(GRPC_SOURCES_DIR ${THIRD_PARTY_PATH}/grpc) SET(GRPC_INSTALL_DIR ${THIRD_PARTY_PATH}/install/grpc) SET(GRPC_INCLUDE_DIR "${GRPC_INSTALL_DIR}/include/" CACHE PATH "grpc include directory." FORCE) SET(GRPC_CPP_PLUGIN "${GRPC_INSTALL_DIR}/bin/grpc_cpp_plugin" CACHE FILEPATH "GRPC_CPP_PLUGIN" FORCE) + +include(ProcessorCount) +ProcessorCount(NUM_OF_PROCESSOR) + IF(APPLE) - SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j static grpc_cpp_plugin | sed "s/-Werror//g" | sh) + SET(BUILD_CMD make -n HAS_SYSTEM_PROTOBUF=false -s -j ${NUM_OF_PROCESSOR} static grpc_cpp_plugin | sed "s/-Werror//g" | sh) ELSE() - SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j static grpc_cpp_plugin) + SET(BUILD_CMD make HAS_SYSTEM_PROTOBUF=false -s -j ${NUM_OF_PROCESSOR} static grpc_cpp_plugin) ENDIF() +# FIXME(wuyi): do not build zlib cares protobuf twice, find a way to build grpc with them ExternalProject_Add( extern_grpc DEPENDS protobuf zlib - GIT_REPOSITORY "https://github.com/grpc/grpc.git" - GIT_TAG "v1.10.x" + # NOTE(wuyi): + # this package is generated by following steps: + # 1. git clone -b v1.8.x https://github.com/grpc/grpc.git + # 2. git submodule update --init + # 3. keep only zlib, cares, protobuf, boringssl under "third_party", + # checkout and clean other dirs under third_party + # 4. remove .git, and package the directory. + URL "http://paddlepaddledeps.bj.bcebos.com/grpc-v1.10.x.tar.gz" + URL_MD5 "1f268a2aff6759839dccd256adcc91cf" PREFIX ${GRPC_SOURCES_DIR} UPDATE_COMMAND "" CONFIGURE_COMMAND "" BUILD_IN_SOURCE 1 + PATCH_COMMAND cp ${PADDLE_SOURCE_DIR}/patches/grpc/grpc_library.h ${GRPC_SOURCES_DIR}/src/extern_grpc/include/grpcpp/impl/codegen/grpc_library.h && cp ${PADDLE_SOURCE_DIR}/patches/grpc/completion_queue.h ${GRPC_SOURCES_DIR}/src/extern_grpc/include/grpcpp/impl/codegen/completion_queue.h # NOTE(yuyang18): # Disable -Werror, otherwise the compile will fail in MacOS. # It seems that we cannot configure that by make command. @@ -46,7 +59,6 @@ ExternalProject_Add( INSTALL_COMMAND make prefix=${GRPC_INSTALL_DIR} install ) -# FIXME(typhoonzero): hack to get static lib path, try a better way like merge them. ADD_LIBRARY(grpc++_unsecure STATIC IMPORTED GLOBAL) SET_PROPERTY(TARGET grpc++_unsecure PROPERTY IMPORTED_LOCATION "${GRPC_INSTALL_DIR}/lib/libgrpc++_unsecure.a") diff --git a/cmake/external/leveldb.cmake b/cmake/external/leveldb.cmake new file mode 100644 index 0000000000..fb5091731d --- /dev/null +++ b/cmake/external/leveldb.cmake @@ -0,0 +1,44 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +INCLUDE(ExternalProject) + +SET(LEVELDB_SOURCES_DIR ${THIRD_PARTY_PATH}/leveldb) +SET(LEVELDB_INSTALL_DIR ${THIRD_PARTY_PATH}/install/leveldb) +SET(LEVELDB_INCLUDE_DIR "${LEVELDB_INSTALL_DIR}/include" CACHE PATH "leveldb include directory." FORCE) +SET(LEVELDB_LIBRARIES "${LEVELDB_INSTALL_DIR}/lib/libleveldb.a" CACHE FILEPATH "leveldb library." FORCE) +INCLUDE_DIRECTORIES(${LEVELDB_INCLUDE_DIR}) + +ExternalProject_Add( + extern_leveldb + ${EXTERNAL_PROJECT_LOG_ARGS} + PREFIX ${LEVELDB_SOURCES_DIR} + URL "https://github.com/google/leveldb/archive/v1.18.tar.gz" + URL_MD5 "73770de34a2a5ab34498d2e05b2b7fa0" + CONFIGURE_COMMAND "" + BUILD_COMMAND CXXFLAGS=-fPIC make -j ${NUM_OF_PROCESSOR} libleveldb.a + INSTALL_COMMAND mkdir -p ${LEVELDB_INSTALL_DIR}/lib/ + && cp ${LEVELDB_SOURCES_DIR}/src/extern_leveldb/libleveldb.a ${LEVELDB_LIBRARIES} + && cp -r ${LEVELDB_SOURCES_DIR}/src/extern_leveldb/include ${LEVELDB_INSTALL_DIR}/ + BUILD_IN_SOURCE 1 +) + +ADD_DEPENDENCIES(extern_leveldb snappy) + +ADD_LIBRARY(leveldb STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET leveldb PROPERTY IMPORTED_LOCATION ${LEVELDB_LIBRARIES}) +ADD_DEPENDENCIES(leveldb extern_leveldb) + +LIST(APPEND external_project_dependencies leveldb) + diff --git a/cmake/external/libxsmm.cmake b/cmake/external/libxsmm.cmake new file mode 100644 index 0000000000..530f7ebe28 --- /dev/null +++ b/cmake/external/libxsmm.cmake @@ -0,0 +1,57 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +OPTION(WITH_LIBXSMM "Compile with libxsmm" OFF) + +IF(NOT WITH_LIBXSMM) + return() +ENDIF() + +IF(WIN32 OR APPLE OR ANDROID OR IOS) + MESSAGE(WARNING "Windows, Mac or Mobile are not supported with libxsmm in Paddle yet.") + SET(WITH_LIBXSMM OFF CACHE STRING "Disable LIBXSMM" FORCE) + return() +ENDIF() + +INCLUDE (ExternalProject) + +SET(LIBXSMM_SOURCES_DIR ${THIRD_PARTY_PATH}/libxsmm) +SET(LIBXSMM_INSTALL_DIR ${THIRD_PARTY_PATH}/install/libxsmm) +SET(LIBXSMM_INCLUDE_DIR "${LIBXSMM_INSTALL_DIR}/include" CACHE PATH "LIBXSMM include directory." FORCE) +SET(LIBXSMM_LIBRARY_DIR "${LIBXSMM_INSTALL_DIR}/lib" CACHE PATH "LIBXSMM library directory." FORCE) +SET(LIBXSMM_LIBS "${LIBXSMM_LIBRARY_DIR}/libxsmm.a" + "${LIBXSMM_LIBRARY_DIR}/libxsmmnoblas.a") + +ExternalProject_Add( + extern_libxsmm + GIT_REPOSITORY "https://github.com/hfp/libxsmm.git" + GIT_TAG "7cc03b5b342fdbc6b6d990b190671c5dbb8489a2" + PREFIX ${LIBXSMM_SOURCES_DIR} + UPDATE_COMMAND "" + CONFIGURE_COMMAND "" + BUILD_IN_SOURCE 1 + BUILD_COMMAND $(MAKE) --silent PREFIX=${LIBXSMM_INSTALL_DIR} CXX=g++ CC=gcc WARP=0 install + INSTALL_COMMAND "" +) +ADD_LIBRARY(libxsmm STATIC IMPORTED GLOBAL) +SET_PROPERTY(TARGET libxsmm PROPERTY IMPORTED_LOCATION "${LIBXSMM_LIBRARY_DIR}/libxsmm.a") +SET_PROPERTY(TARGET libxsmm PROPERTY IMPORTED_LOCATION "${LIBXSMM_LIBRARY_DIR}/libxsmmnoblas.a") + +MESSAGE(STATUS "Libxsmm library: ${LIBXSMM_LIBS}") +include_directories(${LIBXSMM_INCLUDE_DIR}) +ADD_DEFINITIONS(-DPADDLE_WITH_LIBXSMM) +ADD_DEPENDENCIES(libxsmm extern_libxsmm) +LIST(APPEND external_project_dependencies libxsmm) + diff --git a/cmake/external/mkldnn.cmake b/cmake/external/mkldnn.cmake index 5759e5c489..260985cc8a 100644 --- a/cmake/external/mkldnn.cmake +++ b/cmake/external/mkldnn.cmake @@ -24,7 +24,7 @@ SET(MKLDNN_INSTALL_DIR ${THIRD_PARTY_PATH}/install/mkldnn) SET(MKLDNN_INC_DIR "${MKLDNN_INSTALL_DIR}/include" CACHE PATH "mkldnn include directory." FORCE) IF(WIN32 OR APPLE) - MESSAGE(WARNING + MESSAGE(WARNING "Windows or Mac is not supported with MKLDNN in Paddle yet." "Force WITH_MKLDNN=OFF") SET(WITH_MKLDNN OFF CACHE STRING "Disable MKLDNN in Windows and MacOS" FORCE) @@ -45,22 +45,26 @@ IF(${CBLAS_PROVIDER} STREQUAL "MKLML") ELSE() MESSAGE(FATAL_ERROR "Should enable MKLML when build MKLDNN") ENDIF() - -SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} -Wno-error=strict-overflow") -SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} -Wno-error=strict-overflow") +SET(MKLDNN_FLAG "-Wno-error=strict-overflow -Wno-error=unused-result") +SET(MKLDNN_FLAG "${MKLDNN_FLAG} -Wno-unused-result -Wno-unused-value") +SET(MKLDNN_CFLAG "${CMAKE_C_FLAGS} ${MKLDNN_FLAG}") +SET(MKLDNN_CXXFLAG "${CMAKE_CXX_FLAGS} ${MKLDNN_FLAG}") ExternalProject_Add( ${MKLDNN_PROJECT} ${EXTERNAL_PROJECT_LOG_ARGS} DEPENDS ${MKLDNN_DEPENDS} GIT_REPOSITORY "https://github.com/01org/mkl-dnn.git" - GIT_TAG "v0.11" + GIT_TAG "a29d8487a63afca3d5b8c5bbdbb473cf8ccc6e51" PREFIX ${MKLDNN_SOURCES_DIR} UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER} + CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${MKLDNN_INSTALL_DIR} - CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} + CMAKE_ARGS -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} CMAKE_ARGS -DMKLROOT=${MKLML_ROOT} CMAKE_ARGS -DCMAKE_C_FLAGS=${MKLDNN_CFLAG} CMAKE_ARGS -DCMAKE_CXX_FLAGS=${MKLDNN_CXXFLAG} + CMAKE_ARGS -DWITH_TEST=OFF -DWITH_EXAMPLE=OFF CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${MKLDNN_INSTALL_DIR} -DMKLROOT:PATH=${MKLML_ROOT} ) diff --git a/cmake/external/mklml.cmake b/cmake/external/mklml.cmake index 796bcf28a1..82c424fb79 100644 --- a/cmake/external/mklml.cmake +++ b/cmake/external/mklml.cmake @@ -27,8 +27,12 @@ ENDIF() INCLUDE(ExternalProject) SET(MKLML_PROJECT "extern_mklml") -SET(MKLML_VER "mklml_lnx_2018.0.1.20171007") -SET(MKLML_URL "http://paddlepaddledeps.bj.bcebos.com/${MKLML_VER}.tgz") +IF((NOT DEFINED MKLML_VER) OR (NOT DEFINED MKLML_URL)) + MESSAGE(STATUS "use pre defined download url") + SET(MKLML_VER "mklml_lnx_2018.0.3.20180406" CACHE STRING "" FORCE) + SET(MKLML_URL "http://paddlepaddledeps.cdn.bcebos.com/${MKLML_VER}.tgz" CACHE STRING "" FORCE) +ENDIF() +MESSAGE(STATUS "MKLML_VER: ${MKLML_VER}, MKLML_URL: ${MKLML_URL}") SET(MKLML_SOURCE_DIR "${THIRD_PARTY_PATH}/mklml") SET(MKLML_DOWNLOAD_DIR "${MKLML_SOURCE_DIR}/src/${MKLML_PROJECT}") SET(MKLML_DST_DIR "mklml") diff --git a/cmake/external/openblas.cmake b/cmake/external/openblas.cmake index 8af2765f58..56024edf5b 100644 --- a/cmake/external/openblas.cmake +++ b/cmake/external/openblas.cmake @@ -29,6 +29,8 @@ IF(NOT ${CBLAS_FOUND}) "${CBLAS_INSTALL_DIR}/lib/${CMAKE_STATIC_LIBRARY_PREFIX}openblas${CMAKE_STATIC_LIBRARY_SUFFIX}" CACHE FILEPATH "openblas library." FORCE) + ADD_DEFINITIONS(-DPADDLE_USE_OPENBLAS) + SET(OPENBLAS_CC "${CMAKE_C_COMPILER} -Wno-unused-but-set-variable -Wno-unused-variable") SET(OPENBLAS_COMMIT "v0.2.20") @@ -112,7 +114,17 @@ INCLUDE_DIRECTORIES(${CBLAS_INC_DIR}) SET(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/cblas_dummy.c) FILE(WRITE ${dummyfile} "const char *dummy_cblas = \"${dummyfile}\";") ADD_LIBRARY(cblas STATIC ${dummyfile}) -TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) + +IF("${CBLAS_PROVIDER}" STREQUAL "MKLML") + TARGET_LINK_LIBRARIES(cblas dynload_mklml) +ELSE() + TARGET_LINK_LIBRARIES(cblas ${CBLAS_LIBRARIES}) +ENDIF("${CBLAS_PROVIDER}" STREQUAL "MKLML") + +IF(WITH_LIBXSMM) + TARGET_LINK_LIBRARIES(cblas ${LIBXSMM_LIBS}) + ADD_DEPENDENCIES(cblas extern_libxsmm) +ENDIF() IF(NOT ${CBLAS_FOUND}) ADD_DEPENDENCIES(cblas extern_openblas) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 0fde4373a4..2665996432 100644 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -212,6 +212,7 @@ FUNCTION(build_protobuf TARGET_NAME BUILD_FOR_HOST) ${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/${TARGET_NAME}/cmake ${OPTIONAL_ARGS} -Dprotobuf_BUILD_TESTS=OFF + -DCMAKE_SKIP_RPATH=ON -DCMAKE_POSITION_INDEPENDENT_CODE=ON -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE} -DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR} diff --git a/cmake/external/python.cmake b/cmake/external/python.cmake index d7e5571bdb..f17b8d46dc 100644 --- a/cmake/external/python.cmake +++ b/cmake/external/python.cmake @@ -18,8 +18,9 @@ ENDIF() INCLUDE(python_module) -FIND_PACKAGE(PythonInterp 2.7) -FIND_PACKAGE(PythonLibs 2.7) +FIND_PACKAGE(PythonInterp ${PY_VERSION}) +FIND_PACKAGE(PythonLibs ${PY_VERSION}) + # Fixme: Maybe find a static library. Get SHARED/STATIC by FIND_PACKAGE. ADD_LIBRARY(python SHARED IMPORTED GLOBAL) SET_PROPERTY(TARGET python PROPERTY IMPORTED_LOCATION ${PYTHON_LIBRARIES}) diff --git a/cmake/external/snappy.cmake b/cmake/external/snappy.cmake index 80282329c6..af09ed4d5d 100644 --- a/cmake/external/snappy.cmake +++ b/cmake/external/snappy.cmake @@ -47,8 +47,6 @@ ExternalProject_Add( -DCMAKE_INSTALL_LIBDIR:PATH=${SNAPPY_INSTALL_DIR}/lib -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} - BUILD_COMMAND make -j8 - INSTALL_COMMAND make install ) add_library(snappy STATIC IMPORTED GLOBAL) diff --git a/cmake/external/snappystream.cmake b/cmake/external/snappystream.cmake index 20a9643082..6df636d7fa 100644 --- a/cmake/external/snappystream.cmake +++ b/cmake/external/snappystream.cmake @@ -46,8 +46,6 @@ ExternalProject_Add( -DCMAKE_INSTALL_PREFIX:PATH=${SNAPPYSTREAM_INSTALL_DIR} -DCMAKE_INSTALL_LIBDIR:PATH=${SNAPPYSTREAM_INSTALL_DIR}/lib -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} - BUILD_COMMAND make -j8 - INSTALL_COMMAND make install DEPENDS snappy ) diff --git a/cmake/external/xbyak.cmake b/cmake/external/xbyak.cmake new file mode 100644 index 0000000000..384c2f9328 --- /dev/null +++ b/cmake/external/xbyak.cmake @@ -0,0 +1,58 @@ +# Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set(WITH_XBYAK ON) +if(WIN32 OR APPLE) + SET(WITH_XBYAK OFF CACHE STRING "Disable XBYAK in Windows and MacOS" FORCE) + return() +endif() + +include(ExternalProject) + +set(XBYAK_PROJECT extern_xbyak) +set(XBYAK_PREFIX_DIR ${THIRD_PARTY_PATH}/xbyak) +set(XBYAK_INSTALL_ROOT ${THIRD_PARTY_PATH}/install/xbyak) +set(XBYAK_INC_DIR ${XBYAK_INSTALL_ROOT}/include) + +include_directories(${XBYAK_INC_DIR}) +include_directories(${XBYAK_INC_DIR}/xbyak) + +add_definitions(-DPADDLE_WITH_XBYAK) + +# xbyak options +add_definitions(-DXBYAK64) +add_definitions(-DXBYAK_NO_OP_NAMES) + +ExternalProject_Add( + ${XBYAK_PROJECT} + ${EXTERNAL_PROJECT_LOG_ARGS} + DEPENDS "" + GIT_REPOSITORY "https://github.com/herumi/xbyak.git" + GIT_TAG "v5.661" # Jul 26th + PREFIX ${XBYAK_PREFIX_DIR} + UPDATE_COMMAND "" + CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${XBYAK_INSTALL_ROOT} + CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${XBYAK_INSTALL_ROOT} +) + +if (${CMAKE_VERSION} VERSION_LESS "3.3.0") + set(dummyfile ${CMAKE_CURRENT_BINARY_DIR}/xbyak_dummy.c) + file(WRITE ${dummyfile} "const char *dummy_xbyak = \"${dummyfile}\";") + add_library(xbyak STATIC ${dummyfile}) +else() + add_library(xbyak INTERFACE) +endif() + +add_dependencies(xbyak ${XBYAK_PROJECT}) +list(APPEND external_project_dependencies xbyak) diff --git a/cmake/generic.cmake b/cmake/generic.cmake index 1d3e2ade6d..82c958073c 100644 --- a/cmake/generic.cmake +++ b/cmake/generic.cmake @@ -96,6 +96,20 @@ if(NOT APPLE AND NOT ANDROID) set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -pthread -ldl -lrt") endif(NOT APPLE AND NOT ANDROID) +set_property(GLOBAL PROPERTY FLUID_MODULES "") +# find all fluid modules is used for paddle fluid static library +# for building inference libs +function(find_fluid_modules TARGET_NAME) + get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE) + string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path}) + string(FIND "${__target_path}" "fluid" pos) + if(pos GREATER 1) + get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) + set(fluid_modules ${fluid_modules} ${TARGET_NAME}) + set_property(GLOBAL PROPERTY FLUID_MODULES "${fluid_modules}") + endif() +endfunction(find_fluid_modules) + function(merge_static_libs TARGET_NAME) set(libs ${ARGN}) list(REMOVE_DUPLICATES libs) @@ -195,6 +209,15 @@ function(cc_library TARGET_NAME) list(REMOVE_ITEM cc_library_DEPS warpctc) add_dependencies(${TARGET_NAME} warpctc) endif() + # Only deps libmklml.so, not link + if("${cc_library_DEPS};" MATCHES "mklml;") + list(REMOVE_ITEM cc_library_DEPS mklml) + if(NOT "${TARGET_NAME}" MATCHES "dynload_mklml") + list(APPEND cc_library_DEPS dynload_mklml) + endif() + add_dependencies(${TARGET_NAME} mklml) + target_link_libraries(${TARGET_NAME} "-L${MKLML_LIB_DIR} -liomp5 -Wl,--as-needed") + endif() target_link_libraries(${TARGET_NAME} ${cc_library_DEPS}) add_dependencies(${TARGET_NAME} ${cc_library_DEPS}) endif() @@ -206,8 +229,6 @@ function(cc_library TARGET_NAME) list(APPEND cc_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h) endif() endforeach() - add_style_check_target(${TARGET_NAME} ${cc_library_SRCS} ${cc_library_HEADERS}) - else(cc_library_SRCS) if(cc_library_DEPS) merge_static_libs(${TARGET_NAME} ${cc_library_DEPS}) @@ -231,16 +252,23 @@ endfunction(cc_binary) function(cc_test TARGET_NAME) if(WITH_TESTING) - set(options "") + set(options SERIAL) set(oneValueArgs "") set(multiValueArgs SRCS DEPS ARGS) cmake_parse_arguments(cc_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_executable(${TARGET_NAME} ${cc_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) - add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${cc_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(NAME ${TARGET_NAME} COMMAND ${TARGET_NAME} ${cc_test_ARGS} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + if (${cc_test_SERIAL}) + set_property(TEST ${TARGET_NAME} PROPERTY RUN_SERIAL 1) + + set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true) + set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true) + set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cudnn_deterministic=true) + endif() endif() endfunction(cc_test) @@ -268,7 +296,6 @@ function(nv_library TARGET_NAME) list(APPEND nv_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h) endif() endforeach() - add_style_check_target(${TARGET_NAME} ${nv_library_SRCS} ${nv_library_HEADERS}) else(nv_library_SRCS) if (nv_library_DEPS) merge_static_libs(${TARGET_NAME} ${nv_library_DEPS}) @@ -295,14 +322,21 @@ endfunction(nv_binary) function(nv_test TARGET_NAME) if (WITH_GPU AND WITH_TESTING) - set(options "") + set(options SERIAL) set(oneValueArgs "") set(multiValueArgs SRCS DEPS) cmake_parse_arguments(nv_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) cuda_add_executable(${TARGET_NAME} ${nv_test_SRCS}) - target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) - add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main memory gtest gflags glog) + target_link_libraries(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) + add_dependencies(${TARGET_NAME} ${nv_test_DEPS} paddle_gtest_main lod_tensor memory gtest gflags glog) add_test(${TARGET_NAME} ${TARGET_NAME}) + if (nv_test_SERIAL) + set_property(TEST ${TARGET_NAME} PROPERTY RUN_SERIAL 1) + + set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cpu_deterministic=true) + set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_init_allocated_mem=true) + set_property(TEST ${TARGET_NAME} PROPERTY ENVIRONMENT FLAGS_cudnn_deterministic=true) + endif() endif() endfunction(nv_test) @@ -338,7 +372,6 @@ function(hip_library TARGET_NAME) list(APPEND hip_library_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${source}.h) endif() endforeach() - add_style_check_target(${TARGET_NAME} ${hip_library_SRCS} ${hip_library_HEADERS}) else(hip_library_SRCS) if (hip_library_DEPS) merge_static_libs(${TARGET_NAME} ${hip_library_DEPS}) @@ -550,7 +583,9 @@ function(py_test TARGET_NAME) set(multiValueArgs SRCS DEPS ARGS ENVS) cmake_parse_arguments(py_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} - COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS} + COMMAND env FLAGS_init_allocated_mem=true FLAGS_cudnn_deterministic=true + FLAGS_cpu_deterministic=true + PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_ENVS} ${PYTHON_EXECUTABLE} -u ${py_test_SRCS} ${py_test_ARGS} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() @@ -608,3 +643,21 @@ function(grpc_library TARGET_NAME) COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") cc_library("${TARGET_NAME}" SRCS "${grpc_library_SRCS}" DEPS "${TARGET_NAME}_grpc" "${TARGET_NAME}_proto" "${grpc_library_DEPS}") endfunction() + + +function(brpc_library TARGET_NAME) + set(oneValueArgs PROTO) + set(multiValueArgs SRCS DEPS) + set(options "") + cmake_parse_arguments(brpc_library "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + message(STATUS "generating brpc ${brpc_library_PROTO}") + + get_filename_component(ABS_PROTO ${brpc_library_PROTO} ABSOLUTE) + get_filename_component(PROTO_WE ${brpc_library_PROTO} NAME_WE) + get_filename_component(PROTO_PATH ${ABS_PROTO} PATH) + + protobuf_generate_cpp(brpc_proto_srcs brpc_proto_hdrs "${ABS_PROTO}") + cc_library("${TARGET_NAME}_proto" SRCS "${brpc_proto_srcs}") + cc_library("${TARGET_NAME}" SRCS "${brpc_library_SRCS}" DEPS "${TARGET_NAME}_proto" "${brpc_library_DEPS}") +endfunction() diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index cc75801982..834ab5a9e5 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -12,19 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -set_property(GLOBAL PROPERTY FLUID_MODULES "") -# find all fluid modules is used for paddle fluid static library -function(find_fluid_modules TARGET_NAME) - get_filename_component(__target_path ${TARGET_NAME} ABSOLUTE) - string(REGEX REPLACE "^${PADDLE_SOURCE_DIR}/" "" __target_path ${__target_path}) - string(FIND "${__target_path}" "fluid" pos) - if(pos GREATER 1) - get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) - set(fluid_modules ${fluid_modules} ${TARGET_NAME}) - set_property(GLOBAL PROPERTY FLUID_MODULES "${fluid_modules}") - endif() -endfunction(find_fluid_modules) - # make package for paddle fluid shared and static library function(copy TARGET) set(options "") @@ -39,7 +26,7 @@ function(copy TARGET) message(FATAL_ERROR "${TARGET} source numbers are not equal to destination numbers") endif() math(EXPR len "${copy_lib_SRCS_len} - 1") - + add_custom_target(${TARGET} DEPENDS ${copy_lib_DEPS}) foreach(index RANGE ${len}) list(GET copy_lib_SRCS ${index} src) @@ -52,66 +39,91 @@ function(copy TARGET) endfunction() # third party -set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/eigen3") +set(dst_dir "${FLUID_INSTALL_DIR}/third_party/eigen3") copy(eigen3_lib SRCS ${EIGEN_INCLUDE_DIR}/Eigen/Core ${EIGEN_INCLUDE_DIR}/Eigen/src ${EIGEN_INCLUDE_DIR}/unsupported/Eigen DSTS ${dst_dir}/Eigen ${dst_dir}/Eigen ${dst_dir}/unsupported + DEPS eigen3 ) -set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/gflags") +set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/gflags") copy(gflags_lib SRCS ${GFLAGS_INCLUDE_DIR} ${GFLAGS_LIBRARIES} DSTS ${dst_dir} ${dst_dir}/lib + DEPS gflags ) -set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/glog") +set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/glog") copy(glog_lib SRCS ${GLOG_INCLUDE_DIR} ${GLOG_LIBRARIES} DSTS ${dst_dir} ${dst_dir}/lib + DEPS glog +) + +set(dst_dir "${FLUID_INSTALL_DIR}/third_party/boost/") +copy(boost_lib + SRCS ${BOOST_INCLUDE_DIR}/boost + DSTS ${dst_dir} + DEPS boost ) if(NOT PROTOBUF_FOUND) - set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/protobuf") + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/protobuf") copy(protobuf_lib SRCS ${PROTOBUF_INCLUDE_DIR} ${PROTOBUF_LIBRARY} DSTS ${dst_dir} ${dst_dir}/lib + DEPS extern_protobuf ) endif() if(NOT CBLAS_FOUND) - set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/openblas") + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/openblas") copy(openblas_lib SRCS ${CBLAS_INSTALL_DIR}/lib ${CBLAS_INSTALL_DIR}/include DSTS ${dst_dir} ${dst_dir} + DEPS extern_openblas ) elseif (WITH_MKLML) - set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/mklml") + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/mklml") copy(mklml_lib SRCS ${MKLML_LIB} ${MKLML_IOMP_LIB} ${MKLML_INC_DIR} DSTS ${dst_dir}/lib ${dst_dir}/lib ${dst_dir} + DEPS mklml ) endif() +if(WITH_MKLDNN) + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/mkldnn") + copy(mkldnn_lib + SRCS ${MKLDNN_INC_DIR} ${MKLDNN_SHARED_LIB} + DSTS ${dst_dir} ${dst_dir}/lib + DEPS mkldnn + ) +endif() + if(NOT MOBILE_INFERENCE AND NOT RPI) - set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappy") + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/snappy") copy(snappy_lib SRCS ${SNAPPY_INCLUDE_DIR} ${SNAPPY_LIBRARIES} - DSTS ${dst_dir} ${dst_dir}/lib) + DSTS ${dst_dir} ${dst_dir}/lib + DEPS snappy) - set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/snappystream") + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/snappystream") copy(snappystream_lib SRCS ${SNAPPYSTREAM_INCLUDE_DIR} ${SNAPPYSTREAM_LIBRARIES} - DSTS ${dst_dir} ${dst_dir}/lib) + DSTS ${dst_dir} ${dst_dir}/lib + DEPS snappystream) - set(dst_dir "${CMAKE_INSTALL_PREFIX}/third_party/install/zlib") + set(dst_dir "${FLUID_INSTALL_DIR}/third_party/install/zlib") copy(zlib_lib SRCS ${ZLIB_INCLUDE_DIR} ${ZLIB_LIBRARIES} - DSTS ${dst_dir} ${dst_dir}/lib) + DSTS ${dst_dir} ${dst_dir}/lib + DEPS zlib) endif() # paddle fluid module set(src_dir "${PADDLE_SOURCE_DIR}/paddle/fluid") -set(dst_dir "${CMAKE_INSTALL_PREFIX}/paddle/fluid") +set(dst_dir "${FLUID_INSTALL_DIR}/paddle/fluid") set(module "framework") copy(framework_lib DEPS framework_py_proto SRCS ${src_dir}/${module}/*.h ${src_dir}/${module}/details/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/framework/framework.pb.h @@ -124,10 +136,23 @@ copy(memory_lib DSTS ${dst_dir}/${module} ${dst_dir}/${module}/detail ) +set(inference_deps paddle_fluid_shared paddle_fluid) + +set(module "inference/api") +if (WITH_ANAKIN AND WITH_GPU) + copy(anakin_inference_lib DEPS paddle_inference_api inference_anakin_api + SRCS + ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api + ${ANAKIN_INSTALL_DIR} # anakin release + DSTS ${dst_dir}/inference/anakin ${dst_dir}/inference/anakin) + list(APPEND inference_deps anakin_inference_lib) +endif() + set(module "inference") -copy(inference_lib DEPS paddle_fluid_shared paddle_fluid +copy(inference_lib DEPS ${inference_deps} SRCS ${src_dir}/${module}/*.h ${PADDLE_BINARY_DIR}/paddle/fluid/inference/libpaddle_fluid.* - DSTS ${dst_dir}/${module} ${dst_dir}/${module} + ${src_dir}/${module}/api/paddle_inference_api.h ${src_dir}/${module}/api/demo_ci + DSTS ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ${dst_dir}/${module} ) set(module "platform") @@ -142,4 +167,31 @@ copy(string_lib DSTS ${dst_dir}/${module} ${dst_dir}/${module}/tinyformat ) +set(module "pybind") +copy(pybind_lib + SRCS ${CMAKE_CURRENT_BINARY_DIR}/paddle/fluid/${module}/pybind.h + DSTS ${dst_dir}/${module} +) + +# CMakeCache Info +copy(cmake_cache + SRCS ${CMAKE_CURRENT_BINARY_DIR}/CMakeCache.txt + DSTS ${FLUID_INSTALL_DIR}) + add_custom_target(inference_lib_dist DEPENDS ${inference_lib_dist_dep}) + +# paddle fluid version +execute_process( + COMMAND ${GIT_EXECUTABLE} log --pretty=format:%H -1 + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR} + OUTPUT_VARIABLE PADDLE_GIT_COMMIT) +set(version_file ${FLUID_INSTALL_DIR}/version.txt) +file(WRITE ${version_file} + "GIT COMMIT ID: ${PADDLE_GIT_COMMIT}\n" + "WITH_MKL: ${WITH_MKL}\n" + "WITH_GPU: ${WITH_GPU}\n") +if(WITH_GPU) + file(APPEND ${version_file} + "CUDA version: ${CUDA_VERSION}\n" + "CUDNN version: v${CUDNN_MAJOR_VERSION}\n") +endif() diff --git a/cmake/version.cmake b/cmake/version.cmake index cde650128a..ac10bdf067 100644 --- a/cmake/version.cmake +++ b/cmake/version.cmake @@ -1,23 +1,46 @@ # Get the latest git tag. set(PADDLE_VERSION $ENV{PADDLE_VERSION}) set(tmp_version "HEAD") +set(TAG_VERSION_REGEX "[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?") +set(COMMIT_VERSION_REGEX "[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+[0-9a-f]+") while ("${PADDLE_VERSION}" STREQUAL "") + # Check current branch name execute_process( - COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 ${tmp_version} + COMMAND ${GIT_EXECUTABLE} rev-parse --abbrev-ref ${tmp_version} WORKING_DIRECTORY ${PADDLE_SOURCE_DIR} - OUTPUT_VARIABLE GIT_TAG_NAME - RESULT_VARIABLE GIT_RESULT + OUTPUT_VARIABLE GIT_BRANCH_NAME + RESULT_VARIABLE GIT_BRANCH_RESULT ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) - if (NOT ${GIT_RESULT}) - # Check the tag is a correct version - if (${GIT_TAG_NAME} MATCHES "v[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?") - string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME}) - else() # otherwise, get the previous git tag name. - set(tmp_version "${GIT_TAG_NAME}~1") + if (NOT ${GIT_BRANCH_RESULT}) + execute_process( + COMMAND ${GIT_EXECUTABLE} describe --tags --abbrev=0 --always ${tmp_version} + WORKING_DIRECTORY ${PADDLE_SOURCE_DIR} + OUTPUT_VARIABLE GIT_TAG_NAME + RESULT_VARIABLE GIT_RESULT + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + if (NOT ${GIT_RESULT}) + # Check if current branch is release branch + if (${GIT_BRANCH_NAME} MATCHES "release/${TAG_VERSION_REGEX}") + # Check the tag is a correct version + if (${GIT_TAG_NAME} MATCHES "${COMMIT_VERSION_REGEX}") + # if no tag was found, set PADDLE_VERSION to 0.0.0 to represent latest + set(PADDLE_VERSION "0.0.0") + elseif (${GIT_TAG_NAME} MATCHES "v${TAG_VERSION_REGEX}") + string(REPLACE "v" "" PADDLE_VERSION ${GIT_TAG_NAME}) + else() # otherwise, get the previous git tag name. + set(tmp_version "${GIT_TAG_NAME}~1") + endif() + else() + # otherwise, we always set PADDLE_VERSION to 0.0.0 to represent latest + set(PADDLE_VERSION "0.0.0") + endif() + else() + set(PADDLE_VERSION "0.0.0") + message(WARNING "Cannot add paddle version from git tag") endif() else() set(PADDLE_VERSION "0.0.0") - message(WARNING "Cannot add paddle version from git tag") + message(WARNING "Cannot add paddle version for wrong git branch result") endif() endwhile() diff --git a/doc/about/about_us.rst b/doc/about/about_us.rst new file mode 100644 index 0000000000..f67d8b8130 --- /dev/null +++ b/doc/about/about_us.rst @@ -0,0 +1,53 @@ +========= +关于我们 +========= + +什么是PaddlePaddle +-------------------- + +- PaddlePaddle是百度自主研发并开源的深度学习框架,它能够让开发者和企业安全、快速地实现自己的AI想法 + +- 项目团队汇聚了全球顶级的深度学习科学家,致力于为开发者和企业提供最好的深度学习研发体验 + +- 框架具有易学、易用、安全、高效四大特性,是最适合中国开发者和企业的深度学习工具 + +PaddlePaddle的技术特色 +------------------------- + +- 新一代深度学习框架: PaddlePaddle是基于“深度学习编程语言”的新一代深度学习框架,在保证性能的同时,极大的提升了框架对模型的表达能力,能够描述任意潜在可能出现的模型 + +- 对大规模计算更加友好:经过百度内多种大规模计算业务的打磨,PaddlePaddle在分布式计算上表现优异,基于EDL技术能够节约大量计算资源,同时也能支持大规模稀疏模型的训练 + +- 提供可视化的深度学习:通过Visual DL可以帮助开发者方便的观测训练整体趋势、数据样本质量和中间结果、参数分布和变化趋势、以及模型的结构,帮助开发者更便捷的完成编程过程 + +提供基于PaddlePaddle的教育体系 +-------------------------------- + +- 深度学习课程:百度与中国市场顶级的教育、培训机构共同开发了深度学习精品课程以及学习教材,帮助开发者从零掌握深度学习 + +- 深度学习实训:对于目的是科研和学习的用户,PaddlePaddle提供了无需安装、线上运行的开发环境,并提供算法、算力、数据支持 + +- 线下培训:提供丰富、高质量的线下教育活动,如青年教师培训、线下实战营、沙龙等多种形式的培训和交流 + + +提供基于PaddlePaddle的AI服务 +------------------------------ + +- EadyDL:可以帮助零算法基础的企业快速完成一个深度学习任务,只需少量的数据即可得到优质的模型 + +- AI市场:提供标准化的AI 能力、产品的交易机制,帮助企业快速找到所需,有效开展AI业务 + +- 深度学习竞赛: PaddlePaddle汇聚顶尖深度学习开发者,企业可以发布自己的商业问题,通过竞赛方式快速找到最优的解决方案 + +你对PaddlePaddle有任何的问题都可以通过以下方式联系到我们 +----------------------------------------------------------- + +- 学习/使用问题:可以在 `PaddlePaddle开源社区 `_,以及 `PaddlePaddle中文社区 `_ 向我们反馈 + +- 对PaddlePaddle框架发展的建议:可发送邮件至Paddle-better@baidu.com + +我们期待与你一起打造世界顶级深度学习框架,共同推动AI技术的进步 + + + +PaddlePaddle团队 diff --git a/doc/fluid/CMakeLists.txt b/doc/fluid/CMakeLists.txt index 8086507bb4..be92af3902 100644 --- a/doc/fluid/CMakeLists.txt +++ b/doc/fluid/CMakeLists.txt @@ -15,6 +15,9 @@ set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") # HTML output director set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") +set(IMPORT_PADDLE_STRING "") +set(IMPORT_PADDLEV2_STRING "") + configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" "${BINARY_BUILD_DIR_EN}/conf.py" @@ -27,8 +30,6 @@ sphinx_add_target(paddle_fluid_docs ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_fluid_docs gen_proto_py paddle_python) - # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -50,6 +51,4 @@ sphinx_add_target(paddle_fluid_docs_cn ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) -add_dependencies(paddle_fluid_docs_cn gen_proto_py paddle_python) - add_subdirectory(api) diff --git a/doc/fluid/api/CMakeLists.txt b/doc/fluid/api/CMakeLists.txt index 48b396f078..435d6e10fb 100644 --- a/doc/fluid/api/CMakeLists.txt +++ b/doc/fluid/api/CMakeLists.txt @@ -7,6 +7,9 @@ set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") # HTML output director set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") +set(IMPORT_PADDLE_STRING "import paddle") +set(IMPORT_PADDLEV2_STRING "import paddle.v2") + configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/../../templates/conf.py.en.in" "${BINARY_BUILD_DIR_EN}/conf.py" diff --git a/doc/fluid/api/average.rst b/doc/fluid/api/average.rst new file mode 100644 index 0000000000..496f5b2987 --- /dev/null +++ b/doc/fluid/api/average.rst @@ -0,0 +1,16 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +============= +fluid.average +============= + +.. _api_fluid_average_WeightedAverage: + +WeightedAverage +--------------- + +.. autoclass:: paddle.fluid.average.WeightedAverage + :members: + :noindex: + diff --git a/doc/fluid/api/backward.rst b/doc/fluid/api/backward.rst new file mode 100644 index 0000000000..115e0d24b3 --- /dev/null +++ b/doc/fluid/api/backward.rst @@ -0,0 +1,23 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +============== +fluid.backward +============== + +.. _api_fluid_backward_append_backward: + +append_backward +--------------- + +.. autofunction:: paddle.fluid.backward.append_backward + :noindex: + +.. _api_fluid_backward_calc_gradient: + +calc_gradient +------------- + +.. autofunction:: paddle.fluid.backward.calc_gradient + :noindex: + diff --git a/doc/fluid/api/clip.rst b/doc/fluid/api/clip.rst new file mode 100644 index 0000000000..aeefbb95a4 --- /dev/null +++ b/doc/fluid/api/clip.rst @@ -0,0 +1,43 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +========== +fluid.clip +========== + +.. _api_fluid_clip_ErrorClipByValue: + +ErrorClipByValue +---------------- + +.. autoclass:: paddle.fluid.clip.ErrorClipByValue + :members: + :noindex: + +.. _api_fluid_clip_GradientClipByValue: + +GradientClipByValue +------------------- + +.. autoclass:: paddle.fluid.clip.GradientClipByValue + :members: + :noindex: + +.. _api_fluid_clip_GradientClipByNorm: + +GradientClipByNorm +------------------ + +.. autoclass:: paddle.fluid.clip.GradientClipByNorm + :members: + :noindex: + +.. _api_fluid_clip_GradientClipByGlobalNorm: + +GradientClipByGlobalNorm +------------------------ + +.. autoclass:: paddle.fluid.clip.GradientClipByGlobalNorm + :members: + :noindex: + diff --git a/doc/fluid/api/data.rst b/doc/fluid/api/data.rst deleted file mode 100644 index b56c7332cc..0000000000 --- a/doc/fluid/api/data.rst +++ /dev/null @@ -1,10 +0,0 @@ -================================== -Data Reader Interface and DataSets -================================== - -.. toctree:: - :maxdepth: 1 - - data/data_reader.rst - data/image.rst - data/dataset.rst diff --git a/doc/fluid/api/data_feeder.rst b/doc/fluid/api/data_feeder.rst index 3df5c0307f..11d2890f5b 100644 --- a/doc/fluid/api/data_feeder.rst +++ b/doc/fluid/api/data_feeder.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -=========== -data_feeder -=========== +================= +fluid.data_feeder +================= + +.. _api_fluid_data_feeder_DataFeeder: DataFeeder ---------- diff --git a/doc/fluid/api/evaluator.rst b/doc/fluid/api/evaluator.rst deleted file mode 100644 index f80b87c7d2..0000000000 --- a/doc/fluid/api/evaluator.rst +++ /dev/null @@ -1,28 +0,0 @@ -.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` - !DO NOT EDIT THIS FILE MANUALLY! - -========= -evaluator -========= - -ChunkEvaluator --------------- - -.. autoclass:: paddle.fluid.evaluator.ChunkEvaluator - :members: - :noindex: - -EditDistance --------------- - -.. autoclass:: paddle.fluid.evaluator.EditDistance - :members: - :noindex: - -DetectionMAP --------------- - -.. autoclass:: paddle.fluid.evaluator.DetectionMAP - :members: - :noindex: - diff --git a/doc/fluid/api/executor.rst b/doc/fluid/api/executor.rst index a9cdf264e4..f23ecc1f80 100644 --- a/doc/fluid/api/executor.rst +++ b/doc/fluid/api/executor.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -======== -executor -======== +============== +fluid.executor +============== + +.. _api_fluid_executor_Executor: Executor -------- @@ -12,21 +14,27 @@ Executor :members: :noindex: +.. _api_fluid_executor_global_scope: + global_scope ------------ .. autofunction:: paddle.fluid.executor.global_scope :noindex: +.. _api_fluid_executor_scope_guard: + scope_guard ----------- .. autofunction:: paddle.fluid.executor.scope_guard :noindex: -switch_scope ------------- +.. _api_fluid_executor__switch_scope: + +_switch_scope +------------- -.. autofunction:: paddle.fluid.executor.switch_scope +.. autofunction:: paddle.fluid.executor._switch_scope :noindex: diff --git a/doc/fluid/api/fluid.rst b/doc/fluid/api/fluid.rst new file mode 100644 index 0000000000..7eab58355c --- /dev/null +++ b/doc/fluid/api/fluid.rst @@ -0,0 +1,362 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +===== +fluid +===== + +.. _api_fluid_Block: + +Block +----- + +.. autoclass:: paddle.fluid.Block + :members: + :noindex: + +.. _api_fluid_Variable: + +Variable +-------- + +.. autoclass:: paddle.fluid.Variable + :members: + :noindex: + +.. _api_fluid_Program: + +Program +------- + +.. autoclass:: paddle.fluid.Program + :members: + :noindex: + +.. _api_fluid_Operator: + +Operator +-------- + +.. autoclass:: paddle.fluid.Operator + :members: + :noindex: + +.. _api_fluid_default_startup_program: + +default_startup_program +----------------------- + +.. autofunction:: paddle.fluid.default_startup_program + :noindex: + +.. _api_fluid_default_main_program: + +default_main_program +-------------------- + +.. autofunction:: paddle.fluid.default_main_program + :noindex: + +.. _api_fluid_program_guard: + +program_guard +------------- + +.. autofunction:: paddle.fluid.program_guard + :noindex: + +.. _api_fluid_get_var: + +get_var +------- + +.. autofunction:: paddle.fluid.get_var + :noindex: + +.. _api_fluid_Executor: + +Executor +-------- + +.. autoclass:: paddle.fluid.Executor + :members: + :noindex: + +.. _api_fluid_global_scope: + +global_scope +------------ + +.. autofunction:: paddle.fluid.global_scope + :noindex: + +.. _api_fluid_scope_guard: + +scope_guard +----------- + +.. autofunction:: paddle.fluid.scope_guard + :noindex: + +.. _api_fluid__switch_scope: + +_switch_scope +------------- + +.. autofunction:: paddle.fluid._switch_scope + :noindex: + + +.. _api_fluid_make_channel: + +make_channel +------------ + +.. autofunction:: paddle.fluid.make_channel + :noindex: + +.. _api_fluid_channel_send: + +channel_send +------------ + +.. autofunction:: paddle.fluid.channel_send + :noindex: + +.. _api_fluid_channel_recv: + +channel_recv +------------ + +.. autofunction:: paddle.fluid.channel_recv + :noindex: + +.. _api_fluid_channel_close: + +channel_close +------------- + +.. autofunction:: paddle.fluid.channel_close + :noindex: + +.. _api_fluid_Select: + +Select +------ + +.. autoclass:: paddle.fluid.Select + :members: + :noindex: + +.. _api_fluid_Trainer: + +Trainer +------- + +.. autoclass:: paddle.fluid.Trainer + :members: + :noindex: + +.. _api_fluid_BeginEpochEvent: + +BeginEpochEvent +--------------- + +.. autoclass:: paddle.fluid.BeginEpochEvent + :members: + :noindex: + +.. _api_fluid_EndEpochEvent: + +EndEpochEvent +------------- + +.. autoclass:: paddle.fluid.EndEpochEvent + :members: + :noindex: + +.. _api_fluid_BeginStepEvent: + +BeginStepEvent +-------------- + +.. autoclass:: paddle.fluid.BeginStepEvent + :members: + :noindex: + +.. _api_fluid_EndStepEvent: + +EndStepEvent +------------ + +.. autoclass:: paddle.fluid.EndStepEvent + :members: + :noindex: + +.. _api_fluid_CheckpointConfig: + +CheckpointConfig +---------------- + +.. autoclass:: paddle.fluid.CheckpointConfig + :members: + :noindex: + +.. _api_fluid_Inferencer: + +Inferencer +---------- + +.. autoclass:: paddle.fluid.Inferencer + :members: + :noindex: + +.. _api_fluid_DistributeTranspiler: + +DistributeTranspiler +-------------------- + +.. autoclass:: paddle.fluid.DistributeTranspiler + :members: + :noindex: + +.. _api_fluid_memory_optimize: + +memory_optimize +--------------- + +.. autofunction:: paddle.fluid.memory_optimize + :noindex: + +.. _api_fluid_release_memory: + +release_memory +-------------- + +.. autofunction:: paddle.fluid.release_memory + :noindex: + +.. _api_fluid_ParallelExecutor: + +ParallelExecutor +---------------- + +.. autoclass:: paddle.fluid.ParallelExecutor + :members: + :noindex: + +.. _api_fluid_ExecutionStrategy: + +ExecutionStrategy +----------------- + +.. autoclass:: paddle.fluid.ExecutionStrategy + :members: + :noindex: + +.. _api_fluid_BuildStrategy: + +BuildStrategy +------------- + +.. autoclass:: paddle.fluid.BuildStrategy + :members: + :noindex: + +.. _api_fluid_create_lod_tensor: + +create_lod_tensor +----------------- + +.. autofunction:: paddle.fluid.create_lod_tensor + :noindex: + +.. _api_fluid_create_random_int_lodtensor: + +create_random_int_lodtensor +--------------------------- + +.. autofunction:: paddle.fluid.create_random_int_lodtensor + :noindex: + +.. _api_fluid_LoDTensor: + +LoDTensor +--------- + +.. autoclass:: paddle.fluid.LoDTensor + :members: + :noindex: + +.. _api_fluid_CPUPlace: + +CPUPlace +-------- + +.. autoclass:: paddle.fluid.CPUPlace + :members: + :noindex: + +.. _api_fluid_CUDAPlace: + +CUDAPlace +--------- + +.. autoclass:: paddle.fluid.CUDAPlace + :members: + :noindex: + +.. _api_fluid_CUDAPinnedPlace: + +CUDAPinnedPlace +--------------- + +.. autoclass:: paddle.fluid.CUDAPinnedPlace + :members: + :noindex: + +.. _api_fluid_Tensor: + +Tensor +------ + +.. autoclass:: paddle.fluid.Tensor + :members: + :noindex: + +.. _api_fluid_ParamAttr: + +ParamAttr +--------- + +.. autoclass:: paddle.fluid.ParamAttr + :members: + :noindex: + +.. _api_fluid_WeightNormParamAttr: + +WeightNormParamAttr +------------------- + +.. autoclass:: paddle.fluid.WeightNormParamAttr + :members: + :noindex: + +.. _api_fluid_DataFeeder: + +DataFeeder +---------- + +.. autoclass:: paddle.fluid.DataFeeder + :members: + :noindex: + +.. _api_fluid_Scope: + +Scope +----- + +.. autoclass:: paddle.fluid.Scope + :members: + :noindex: + diff --git a/doc/fluid/api/gen_doc.py b/doc/fluid/api/gen_doc.py index 89ab880301..02efce2bf8 100644 --- a/doc/fluid/api/gen_doc.py +++ b/doc/fluid/api/gen_doc.py @@ -29,19 +29,27 @@ def parse_arg(): class DocGenerator(object): - def __init__(self, module_name, stream=sys.stdout): + def __init__(self, module_name=None, stream=sys.stdout): + if module_name == "": + module_name = None self.stream = stream - self.module_name = module_name - if not hasattr(fluid, module_name): - raise ValueError("Cannot find fluid.{0}".format(module_name)) + if module_name is None: + self.module_name = "fluid" else: - self.module = getattr(fluid, module_name) + self.module_name = "fluid." + module_name + if module_name is None: + self.module = fluid + else: + if not hasattr(fluid, module_name): + raise ValueError("Cannot find fluid.{0}".format(module_name)) + else: + self.module = getattr(fluid, module_name) self.stream.write('''.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! ''') - self._print_header_(module_name, dot='=', is_title=True) + self._print_header_(self.module_name, dot='=', is_title=True) def print_submodule(self, submodule_name): submodule = getattr(self.module, submodule_name) @@ -60,25 +68,29 @@ class DocGenerator(object): self._print_header_(name, dot='=', is_title=False) def print_item(self, name): - item = getattr(self.module, name) + item = getattr(self.module, name, None) + if item is None: + return if isinstance(item, types.TypeType): self.print_class(name) elif isinstance(item, types.FunctionType): self.print_method(name) else: - raise RuntimeError("Unsupported item {0}".format(name)) + pass def print_class(self, name): + self._print_ref_(name) self._print_header_(name, dot='-', is_title=False) - self.stream.write('''.. autoclass:: paddle.fluid.{0}.{1} + self.stream.write('''.. autoclass:: paddle.{0}.{1} :members: :noindex: '''.format(self.module_name, name)) def print_method(self, name): + self._print_ref_(name) self._print_header_(name, dot='-', is_title=False) - self.stream.write('''.. autofunction:: paddle.fluid.{0}.{1} + self.stream.write('''.. autofunction:: paddle.{0}.{1} :noindex: '''.format(self.module_name, name)) @@ -94,6 +106,10 @@ class DocGenerator(object): self.stream.write('\n') self.stream.write('\n') + def _print_ref_(self, name): + self.stream.write(".. _api_{0}_{1}:\n\n".format("_".join( + self.module_name.split(".")), name)) + def main(): args = parse_arg() diff --git a/doc/fluid/api/gen_doc.sh b/doc/fluid/api/gen_doc.sh index ba7b7ba8e5..b14ee29873 100755 --- a/doc/fluid/api/gen_doc.sh +++ b/doc/fluid/api/gen_doc.sh @@ -1,7 +1,9 @@ #!/bin/bash -python gen_doc.py layers --submodules control_flow device io nn ops tensor > layers.rst +python gen_doc.py layers --submodules control_flow device io nn ops tensor learning_rate_scheduler detection metric_op tensor > layers.rst -for module in io data_feeder evaluator executor initializer io nets optimizer param_attr profiler regularizer +for module in data_feeder clip metrics executor initializer io nets optimizer param_attr profiler regularizer transpiler recordio_writer backward average profiler do python gen_doc.py ${module} > ${module}.rst done + +python gen_doc.py "" > fluid.rst diff --git a/doc/fluid/api/index_en.rst b/doc/fluid/api/index_en.rst index 06c686d950..359406819a 100644 --- a/doc/fluid/api/index_en.rst +++ b/doc/fluid/api/index_en.rst @@ -1,19 +1,26 @@ -====================== -Fluid -====================== +============= +API Reference +============= .. toctree:: :maxdepth: 1 + fluid.rst layers.rst data_feeder.rst executor.rst initializer.rst - evaluator.rst + metrics.rst nets.rst + clip.rst optimizer.rst param_attr.rst profiler.rst regularizer.rst io.rst data.rst + transpiler.rst + recordio_writer.rst + backward.rst + average.rst + profiler.rst diff --git a/doc/fluid/api/initializer.rst b/doc/fluid/api/initializer.rst index 2f02c5de09..dc0b52b14f 100644 --- a/doc/fluid/api/initializer.rst +++ b/doc/fluid/api/initializer.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -=========== -initializer -=========== +================= +fluid.initializer +================= + +.. _api_fluid_initializer_Constant: Constant -------- @@ -12,6 +14,8 @@ Constant :members: :noindex: +.. _api_fluid_initializer_Uniform: + Uniform ------- @@ -19,6 +23,8 @@ Uniform :members: :noindex: +.. _api_fluid_initializer_Normal: + Normal ------ @@ -26,6 +32,8 @@ Normal :members: :noindex: +.. _api_fluid_initializer_Xavier: + Xavier ------ @@ -33,13 +41,42 @@ Xavier :members: :noindex: +.. _api_fluid_initializer_Bilinear: + +Bilinear +-------- + +.. autoclass:: paddle.fluid.initializer.Bilinear + :members: + :noindex: + +.. _api_fluid_initializer_MSRA: + MSRA ------- +---- .. autoclass:: paddle.fluid.initializer.MSRA :members: :noindex: +.. _api_fluid_initializer_force_init_on_cpu: + +force_init_on_cpu +----------------- + +.. autofunction:: paddle.fluid.initializer.force_init_on_cpu + :noindex: + +.. _api_fluid_initializer_init_on_cpu: + +init_on_cpu +----------- + +.. autofunction:: paddle.fluid.initializer.init_on_cpu + :noindex: + +.. _api_fluid_initializer_ConstantInitializer: + ConstantInitializer ------------------- @@ -47,6 +84,8 @@ ConstantInitializer :members: :noindex: +.. _api_fluid_initializer_UniformInitializer: + UniformInitializer ------------------ @@ -54,6 +93,8 @@ UniformInitializer :members: :noindex: +.. _api_fluid_initializer_NormalInitializer: + NormalInitializer ----------------- @@ -61,6 +102,8 @@ NormalInitializer :members: :noindex: +.. _api_fluid_initializer_XavierInitializer: + XavierInitializer ----------------- @@ -68,9 +111,21 @@ XavierInitializer :members: :noindex: +.. _api_fluid_initializer_BilinearInitializer: + +BilinearInitializer +------------------- + +.. autoclass:: paddle.fluid.initializer.BilinearInitializer + :members: + :noindex: + +.. _api_fluid_initializer_MSRAInitializer: MSRAInitializer ------------------ +--------------- + .. autoclass:: paddle.fluid.initializer.MSRAInitializer :members: :noindex: + diff --git a/doc/fluid/api/io.rst b/doc/fluid/api/io.rst index dd9d88b669..7cee0bc4d9 100644 --- a/doc/fluid/api/io.rst +++ b/doc/fluid/api/io.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -== -io -== +======== +fluid.io +======== + +.. _api_fluid_io_save_vars: save_vars --------- @@ -11,51 +13,115 @@ save_vars .. autofunction:: paddle.fluid.io.save_vars :noindex: +.. _api_fluid_io_save_params: + save_params ----------- .. autofunction:: paddle.fluid.io.save_params :noindex: +.. _api_fluid_io_save_persistables: + save_persistables ----------------- .. autofunction:: paddle.fluid.io.save_persistables :noindex: +.. _api_fluid_io_load_vars: + load_vars --------- .. autofunction:: paddle.fluid.io.load_vars :noindex: +.. _api_fluid_io_load_params: + load_params ----------- .. autofunction:: paddle.fluid.io.load_params :noindex: +.. _api_fluid_io_load_persistables: + load_persistables ----------------- .. autofunction:: paddle.fluid.io.load_persistables :noindex: +.. _api_fluid_io_save_inference_model: + save_inference_model -------------------- .. autofunction:: paddle.fluid.io.save_inference_model :noindex: +.. _api_fluid_io_load_inference_model: + load_inference_model -------------------- .. autofunction:: paddle.fluid.io.load_inference_model :noindex: +.. _api_fluid_io_get_inference_program: + get_inference_program --------------------- .. autofunction:: paddle.fluid.io.get_inference_program :noindex: +.. _api_fluid_io_save_checkpoint: + +save_checkpoint +--------------- + +.. autofunction:: paddle.fluid.io.save_checkpoint + :noindex: + +.. _api_fluid_io_load_checkpoint: + +load_checkpoint +--------------- + +.. autofunction:: paddle.fluid.io.load_checkpoint + :noindex: + +.. _api_fluid_io_clean_checkpoint: + +clean_checkpoint +---------------- + +.. autofunction:: paddle.fluid.io.clean_checkpoint + :noindex: + +.. _api_fluid_io_load_persist_vars_without_grad: + +load_persist_vars_without_grad +------------------------------ + +.. autofunction:: paddle.fluid.io.load_persist_vars_without_grad + :noindex: + +.. _api_fluid_io_save_persist_vars_without_grad: + +save_persist_vars_without_grad +------------------------------ + +.. autofunction:: paddle.fluid.io.save_persist_vars_without_grad + :noindex: + +.. _api_fluid_io_get_latest_checkpoint_serial: + +get_latest_checkpoint_serial +---------------------------- + +.. autofunction:: paddle.fluid.io.get_latest_checkpoint_serial + :noindex: + diff --git a/doc/fluid/api/layers.rst b/doc/fluid/api/layers.rst index ff3c9346a2..ecbd8191cc 100644 --- a/doc/fluid/api/layers.rst +++ b/doc/fluid/api/layers.rst @@ -1,25 +1,31 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -====== -layers -====== +============ +fluid.layers +============ control_flow ============ +.. _api_fluid_layers_split_lod_tensor: + split_lod_tensor ---------------- .. autofunction:: paddle.fluid.layers.split_lod_tensor :noindex: +.. _api_fluid_layers_merge_lod_tensor: + merge_lod_tensor ---------------- .. autofunction:: paddle.fluid.layers.merge_lod_tensor :noindex: +.. _api_fluid_layers_BlockGuard: + BlockGuard ---------- @@ -27,6 +33,8 @@ BlockGuard :members: :noindex: +.. _api_fluid_layers_BlockGuardWithCompletion: + BlockGuardWithCompletion ------------------------ @@ -34,12 +42,7 @@ BlockGuardWithCompletion :members: :noindex: -StaticRNNMemoryLink -------------------- - -.. autoclass:: paddle.fluid.layers.StaticRNNMemoryLink - :members: - :noindex: +.. _api_fluid_layers_WhileGuard: WhileGuard ---------- @@ -48,6 +51,8 @@ WhileGuard :members: :noindex: +.. _api_fluid_layers_While: + While ----- @@ -55,23 +60,32 @@ While :members: :noindex: +.. _api_fluid_layers_Switch: + +Switch +------ + +.. autoclass:: paddle.fluid.layers.Switch + :members: + :noindex: + +.. _api_fluid_layers_lod_rank_table: + lod_rank_table -------------- .. autofunction:: paddle.fluid.layers.lod_rank_table :noindex: +.. _api_fluid_layers_max_sequence_len: + max_sequence_len ---------------- .. autofunction:: paddle.fluid.layers.max_sequence_len :noindex: -topk ----- - -.. autofunction:: paddle.fluid.layers.topk - :noindex: +.. _api_fluid_layers_lod_tensor_to_array: lod_tensor_to_array ------------------- @@ -79,54 +93,80 @@ lod_tensor_to_array .. autofunction:: paddle.fluid.layers.lod_tensor_to_array :noindex: +.. _api_fluid_layers_array_to_lod_tensor: + array_to_lod_tensor ------------------- .. autofunction:: paddle.fluid.layers.array_to_lod_tensor :noindex: +.. _api_fluid_layers_increment: + increment --------- .. autofunction:: paddle.fluid.layers.increment :noindex: +.. _api_fluid_layers_array_write: + array_write ----------- .. autofunction:: paddle.fluid.layers.array_write :noindex: +.. _api_fluid_layers_create_array: + create_array ------------ .. autofunction:: paddle.fluid.layers.create_array :noindex: +.. _api_fluid_layers_less_than: + less_than --------- .. autofunction:: paddle.fluid.layers.less_than :noindex: +.. _api_fluid_layers_equal: + +equal +----- + +.. autofunction:: paddle.fluid.layers.equal + :noindex: + +.. _api_fluid_layers_array_read: + array_read ---------- .. autofunction:: paddle.fluid.layers.array_read :noindex: +.. _api_fluid_layers_shrink_memory: + shrink_memory ------------- .. autofunction:: paddle.fluid.layers.shrink_memory :noindex: +.. _api_fluid_layers_array_length: + array_length ------------ .. autofunction:: paddle.fluid.layers.array_length :noindex: +.. _api_fluid_layers_IfElse: + IfElse ------ @@ -134,6 +174,8 @@ IfElse :members: :noindex: +.. _api_fluid_layers_DynamicRNN: + DynamicRNN ---------- @@ -141,6 +183,8 @@ DynamicRNN :members: :noindex: +.. _api_fluid_layers_ConditionalBlock: + ConditionalBlock ---------------- @@ -148,6 +192,8 @@ ConditionalBlock :members: :noindex: +.. _api_fluid_layers_StaticRNN: + StaticRNN --------- @@ -155,12 +201,16 @@ StaticRNN :members: :noindex: +.. _api_fluid_layers_reorder_lod_tensor_by_rank: + reorder_lod_tensor_by_rank -------------------------- .. autofunction:: paddle.fluid.layers.reorder_lod_tensor_by_rank :noindex: +.. _api_fluid_layers_ParallelDo: + ParallelDo ---------- @@ -168,15 +218,27 @@ ParallelDo :members: :noindex: +.. _api_fluid_layers_Print: + Print ----- .. autofunction:: paddle.fluid.layers.Print :noindex: +.. _api_fluid_layers_is_empty: + +is_empty +-------- + +.. autofunction:: paddle.fluid.layers.is_empty + :noindex: + device ====== +.. _api_fluid_layers_get_places: + get_places ---------- @@ -186,12 +248,16 @@ get_places io == +.. _api_fluid_layers_data: + data ---- .. autofunction:: paddle.fluid.layers.data :noindex: +.. _api_fluid_layers_BlockGuardServ: + BlockGuardServ -------------- @@ -199,6 +265,8 @@ BlockGuardServ :members: :noindex: +.. _api_fluid_layers_ListenAndServ: + ListenAndServ ------------- @@ -206,86 +274,187 @@ ListenAndServ :members: :noindex: +.. _api_fluid_layers_Send: + Send ---- .. autofunction:: paddle.fluid.layers.Send :noindex: +.. _api_fluid_layers_Recv: + +Recv +---- + +.. autofunction:: paddle.fluid.layers.Recv + :noindex: + +.. _api_fluid_layers_open_recordio_file: + +open_recordio_file +------------------ + +.. autofunction:: paddle.fluid.layers.open_recordio_file + :noindex: + +.. _api_fluid_layers_open_files: + +open_files +---------- + +.. autofunction:: paddle.fluid.layers.open_files + :noindex: + +.. _api_fluid_layers_read_file: + +read_file +--------- + +.. autofunction:: paddle.fluid.layers.read_file + :noindex: + +.. _api_fluid_layers_shuffle: + +shuffle +------- + +.. autofunction:: paddle.fluid.layers.shuffle + :noindex: + +.. _api_fluid_layers_batch: + +batch +----- + +.. autofunction:: paddle.fluid.layers.batch + :noindex: + +.. _api_fluid_layers_double_buffer: + +double_buffer +------------- + +.. autofunction:: paddle.fluid.layers.double_buffer + :noindex: + +.. _api_fluid_layers_random_data_generator: + +random_data_generator +--------------------- + +.. autofunction:: paddle.fluid.layers.random_data_generator + :noindex: + +.. _api_fluid_layers_Preprocessor: + +Preprocessor +------------ + +.. autoclass:: paddle.fluid.layers.Preprocessor + :members: + :noindex: + +.. _api_fluid_layers_load: + +load +---- + +.. autofunction:: paddle.fluid.layers.load + :noindex: + nn == +.. _api_fluid_layers_fc: + fc -- .. autofunction:: paddle.fluid.layers.fc :noindex: +.. _api_fluid_layers_embedding: + embedding --------- .. autofunction:: paddle.fluid.layers.embedding :noindex: +.. _api_fluid_layers_dynamic_lstm: + dynamic_lstm ------------ .. autofunction:: paddle.fluid.layers.dynamic_lstm :noindex: +.. _api_fluid_layers_dynamic_lstmp: + dynamic_lstmp ------------- .. autofunction:: paddle.fluid.layers.dynamic_lstmp :noindex: +.. _api_fluid_layers_dynamic_gru: + dynamic_gru ----------- .. autofunction:: paddle.fluid.layers.dynamic_gru :noindex: +.. _api_fluid_layers_gru_unit: + gru_unit -------- .. autofunction:: paddle.fluid.layers.gru_unit :noindex: +.. _api_fluid_layers_linear_chain_crf: + linear_chain_crf ---------------- .. autofunction:: paddle.fluid.layers.linear_chain_crf :noindex: +.. _api_fluid_layers_crf_decoding: + crf_decoding ------------ .. autofunction:: paddle.fluid.layers.crf_decoding :noindex: +.. _api_fluid_layers_cos_sim: + cos_sim ------- .. autofunction:: paddle.fluid.layers.cos_sim :noindex: +.. _api_fluid_layers_cross_entropy: + cross_entropy ------------- .. autofunction:: paddle.fluid.layers.cross_entropy :noindex: +.. _api_fluid_layers_square_error_cost: + square_error_cost ----------------- .. autofunction:: paddle.fluid.layers.square_error_cost :noindex: -accuracy --------- - -.. autofunction:: paddle.fluid.layers.accuracy - :noindex: +.. _api_fluid_layers_chunk_eval: chunk_eval ---------- @@ -293,321 +462,706 @@ chunk_eval .. autofunction:: paddle.fluid.layers.chunk_eval :noindex: +.. _api_fluid_layers_sequence_conv: + sequence_conv ------------- .. autofunction:: paddle.fluid.layers.sequence_conv :noindex: +.. _api_fluid_layers_conv2d: + conv2d ------ .. autofunction:: paddle.fluid.layers.conv2d :noindex: +.. _api_fluid_layers_conv3d: + +conv3d +------ + +.. autofunction:: paddle.fluid.layers.conv3d + :noindex: + +.. _api_fluid_layers_sequence_pool: + sequence_pool ------------- .. autofunction:: paddle.fluid.layers.sequence_pool :noindex: +.. _api_fluid_layers_sequence_softmax: + +sequence_softmax +---------------- + +.. autofunction:: paddle.fluid.layers.sequence_softmax + :noindex: + +.. _api_fluid_layers_softmax: + +softmax +------- + +.. autofunction:: paddle.fluid.layers.softmax + :noindex: + +.. _api_fluid_layers_pool2d: + pool2d ------ .. autofunction:: paddle.fluid.layers.pool2d :noindex: -batch_norm ----------- +.. _api_fluid_layers_pool3d: -.. autofunction:: paddle.fluid.layers.batch_norm +pool3d +------ + +.. autofunction:: paddle.fluid.layers.pool3d :noindex: -layer_norm +.. _api_fluid_layers_batch_norm: + +batch_norm ---------- -.. autofunction:: paddle.fluid.layers.layer_norm +.. autofunction:: paddle.fluid.layers.batch_norm :noindex: +.. _api_fluid_layers_beam_search_decode: + beam_search_decode ------------------ .. autofunction:: paddle.fluid.layers.beam_search_decode :noindex: +.. _api_fluid_layers_conv2d_transpose: + conv2d_transpose ---------------- .. autofunction:: paddle.fluid.layers.conv2d_transpose :noindex: +.. _api_fluid_layers_conv3d_transpose: + +conv3d_transpose +---------------- + +.. autofunction:: paddle.fluid.layers.conv3d_transpose + :noindex: + +.. _api_fluid_layers_sequence_expand: + sequence_expand --------------- .. autofunction:: paddle.fluid.layers.sequence_expand :noindex: +.. _api_fluid_layers_lstm_unit: + lstm_unit --------- .. autofunction:: paddle.fluid.layers.lstm_unit :noindex: +.. _api_fluid_layers_reduce_sum: + reduce_sum ---------- .. autofunction:: paddle.fluid.layers.reduce_sum :noindex: +.. _api_fluid_layers_reduce_mean: + reduce_mean ----------- .. autofunction:: paddle.fluid.layers.reduce_mean :noindex: +.. _api_fluid_layers_reduce_max: + reduce_max ---------- .. autofunction:: paddle.fluid.layers.reduce_max :noindex: +.. _api_fluid_layers_reduce_min: + reduce_min ---------- .. autofunction:: paddle.fluid.layers.reduce_min :noindex: +.. _api_fluid_layers_reduce_prod: + +reduce_prod +----------- + +.. autofunction:: paddle.fluid.layers.reduce_prod + :noindex: + +.. _api_fluid_layers_sequence_first_step: + sequence_first_step ------------------- .. autofunction:: paddle.fluid.layers.sequence_first_step :noindex: +.. _api_fluid_layers_sequence_last_step: + sequence_last_step ------------------ .. autofunction:: paddle.fluid.layers.sequence_last_step :noindex: +.. _api_fluid_layers_dropout: + dropout ------- .. autofunction:: paddle.fluid.layers.dropout :noindex: +.. _api_fluid_layers_split: + split ----- .. autofunction:: paddle.fluid.layers.split :noindex: +.. _api_fluid_layers_ctc_greedy_decoder: + ctc_greedy_decoder ------------------ .. autofunction:: paddle.fluid.layers.ctc_greedy_decoder :noindex: +.. _api_fluid_layers_edit_distance: + edit_distance ------------- .. autofunction:: paddle.fluid.layers.edit_distance :noindex: +.. _api_fluid_layers_l2_normalize: + l2_normalize ------------ .. autofunction:: paddle.fluid.layers.l2_normalize :noindex: +.. _api_fluid_layers_matmul: + matmul ------ .. autofunction:: paddle.fluid.layers.matmul :noindex: +.. _api_fluid_layers_topk: + +topk +---- + +.. autofunction:: paddle.fluid.layers.topk + :noindex: + +.. _api_fluid_layers_warpctc: + warpctc ------- .. autofunction:: paddle.fluid.layers.warpctc :noindex: +.. _api_fluid_layers_sequence_reshape: + sequence_reshape ---------------- .. autofunction:: paddle.fluid.layers.sequence_reshape :noindex: +.. _api_fluid_layers_transpose: + transpose --------- .. autofunction:: paddle.fluid.layers.transpose :noindex: +.. _api_fluid_layers_im2sequence: + im2sequence ----------- .. autofunction:: paddle.fluid.layers.im2sequence :noindex: +.. _api_fluid_layers_nce: + nce --- .. autofunction:: paddle.fluid.layers.nce :noindex: +.. _api_fluid_layers_beam_search: + beam_search ----------- .. autofunction:: paddle.fluid.layers.beam_search :noindex: +.. _api_fluid_layers_row_conv: + row_conv -------- .. autofunction:: paddle.fluid.layers.row_conv :noindex: +.. _api_fluid_layers_multiplex: + multiplex --------- .. autofunction:: paddle.fluid.layers.multiplex :noindex: -label_smooth ------------- +.. _api_fluid_layers_layer_norm: -.. autofunction:: paddle.fluid.layers.label_smooth +layer_norm +---------- + +.. autofunction:: paddle.fluid.layers.layer_norm :noindex: -roi_pool +.. _api_fluid_layers_softmax_with_cross_entropy: + +softmax_with_cross_entropy +-------------------------- + +.. autofunction:: paddle.fluid.layers.softmax_with_cross_entropy + :noindex: + +.. _api_fluid_layers_smooth_l1: + +smooth_l1 --------- -.. autofunction:: paddle.fluid.layers.roi_pool +.. autofunction:: paddle.fluid.layers.smooth_l1 :noindex: - -ops -=== +.. _api_fluid_layers_one_hot: -mean ----- +one_hot +------- -.. autofunction:: paddle.fluid.layers.mean +.. autofunction:: paddle.fluid.layers.one_hot :noindex: -mul ---- +.. _api_fluid_layers_autoincreased_step_counter: -.. autofunction:: paddle.fluid.layers.mul +autoincreased_step_counter +-------------------------- + +.. autofunction:: paddle.fluid.layers.autoincreased_step_counter :noindex: +.. _api_fluid_layers_reshape: + reshape ------- .. autofunction:: paddle.fluid.layers.reshape :noindex: +.. _api_fluid_layers_lod_reset: + +lod_reset +--------- + +.. autofunction:: paddle.fluid.layers.lod_reset + :noindex: + +.. _api_fluid_layers_lrn: + +lrn +--- + +.. autofunction:: paddle.fluid.layers.lrn + :noindex: + +.. _api_fluid_layers_pad: + pad --- .. autofunction:: paddle.fluid.layers.pad :noindex: -scale ------ +.. _api_fluid_layers_label_smooth: -.. autofunction:: paddle.fluid.layers.scale +label_smooth +------------ + +.. autofunction:: paddle.fluid.layers.label_smooth :noindex: -sigmoid_cross_entropy_with_logits ---------------------------------- +.. _api_fluid_layers_roi_pool: -.. autofunction:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits +roi_pool +-------- + +.. autofunction:: paddle.fluid.layers.roi_pool :noindex: -elementwise_add ---------------- +.. _api_fluid_layers_dice_loss: -.. autofunction:: paddle.fluid.layers.elementwise_add +dice_loss +--------- + +.. autofunction:: paddle.fluid.layers.dice_loss :noindex: -elementwise_div ---------------- +.. _api_fluid_layers_image_resize: -.. autofunction:: paddle.fluid.layers.elementwise_div +image_resize +------------ + +.. autofunction:: paddle.fluid.layers.image_resize :noindex: +.. _api_fluid_layers_image_resize_short: + +image_resize_short +------------------ + +.. autofunction:: paddle.fluid.layers.image_resize_short + :noindex: + +.. _api_fluid_layers_resize_bilinear: + +resize_bilinear +--------------- + +.. autofunction:: paddle.fluid.layers.resize_bilinear + :noindex: + +.. _api_fluid_layers_gather: + +gather +------ + +.. autofunction:: paddle.fluid.layers.gather + :noindex: + +.. _api_fluid_layers_random_crop: + +random_crop +----------- + +.. autofunction:: paddle.fluid.layers.random_crop + :noindex: + +.. _api_fluid_layers_mean_iou: + +mean_iou +-------- + +.. autofunction:: paddle.fluid.layers.mean_iou + :noindex: + +.. _api_fluid_layers_relu: + +relu +---- + +.. autofunction:: paddle.fluid.layers.relu + :noindex: + +.. _api_fluid_layers_log: + +log +--- + +.. autofunction:: paddle.fluid.layers.log + :noindex: + +.. _api_fluid_layers_crop: + +crop +---- + +.. autofunction:: paddle.fluid.layers.crop + :noindex: + +ops +=== + +.. _api_fluid_layers_mean: + +mean +---- + +.. autofunction:: paddle.fluid.layers.mean + :noindex: + +.. _api_fluid_layers_mul: + +mul +--- + +.. autofunction:: paddle.fluid.layers.mul + :noindex: + +.. _api_fluid_layers_scale: + +scale +----- + +.. autofunction:: paddle.fluid.layers.scale + :noindex: + +.. _api_fluid_layers_sigmoid_cross_entropy_with_logits: + +sigmoid_cross_entropy_with_logits +--------------------------------- + +.. autofunction:: paddle.fluid.layers.sigmoid_cross_entropy_with_logits + :noindex: + +.. _api_fluid_layers_elementwise_add: + +elementwise_add +--------------- + +.. autofunction:: paddle.fluid.layers.elementwise_add + :noindex: + +.. _api_fluid_layers_elementwise_div: + +elementwise_div +--------------- + +.. autofunction:: paddle.fluid.layers.elementwise_div + :noindex: + +.. _api_fluid_layers_elementwise_sub: + elementwise_sub --------------- .. autofunction:: paddle.fluid.layers.elementwise_sub :noindex: +.. _api_fluid_layers_elementwise_mul: + elementwise_mul --------------- .. autofunction:: paddle.fluid.layers.elementwise_mul :noindex: +.. _api_fluid_layers_elementwise_max: + elementwise_max --------------- .. autofunction:: paddle.fluid.layers.elementwise_max :noindex: +.. _api_fluid_layers_elementwise_min: + elementwise_min --------------- .. autofunction:: paddle.fluid.layers.elementwise_min :noindex: +.. _api_fluid_layers_elementwise_pow: + elementwise_pow --------------- .. autofunction:: paddle.fluid.layers.elementwise_pow :noindex: +.. _api_fluid_layers_clip: + clip ---- .. autofunction:: paddle.fluid.layers.clip :noindex: +.. _api_fluid_layers_clip_by_norm: + clip_by_norm ------------ .. autofunction:: paddle.fluid.layers.clip_by_norm :noindex: -sequence_softmax ----------------- +.. _api_fluid_layers_logical_and: -.. autofunction:: paddle.fluid.layers.sequence_softmax +logical_and +----------- + +.. autofunction:: paddle.fluid.layers.logical_and + :noindex: + +.. _api_fluid_layers_logical_or: + +logical_or +---------- + +.. autofunction:: paddle.fluid.layers.logical_or + :noindex: + +.. _api_fluid_layers_logical_xor: + +logical_xor +----------- + +.. autofunction:: paddle.fluid.layers.logical_xor + :noindex: + +.. _api_fluid_layers_logical_not: + +logical_not +----------- + +.. autofunction:: paddle.fluid.layers.logical_not + :noindex: + +.. _api_fluid_layers_uniform_random_batch_size_like: + +uniform_random_batch_size_like +------------------------------ + +.. autofunction:: paddle.fluid.layers.uniform_random_batch_size_like :noindex: +.. _api_fluid_layers_gaussian_random: + +gaussian_random +--------------- + +.. autofunction:: paddle.fluid.layers.gaussian_random + :noindex: + +.. _api_fluid_layers_gaussian_random_batch_size_like: + +gaussian_random_batch_size_like +------------------------------- + +.. autofunction:: paddle.fluid.layers.gaussian_random_batch_size_like + :noindex: + +.. _api_fluid_layers_scatter: + +scatter +------- + +.. autofunction:: paddle.fluid.layers.scatter + :noindex: + +.. _api_fluid_layers_sum: + +sum +--- + +.. autofunction:: paddle.fluid.layers.sum + :noindex: + +.. _api_fluid_layers_slice: + +slice +----- + +.. autofunction:: paddle.fluid.layers.slice + :noindex: + +.. _api_fluid_layers_polygon_box_transform: + +polygon_box_transform +--------------------- + +.. autofunction:: paddle.fluid.layers.polygon_box_transform + :noindex: + +.. _api_fluid_layers_shape: + +shape +----- + +.. autofunction:: paddle.fluid.layers.shape + :noindex: + +.. _api_fluid_layers_iou_similarity: + +iou_similarity +-------------- + +.. autofunction:: paddle.fluid.layers.iou_similarity + :noindex: + +.. _api_fluid_layers_maxout: + +maxout +------ + +.. autofunction:: paddle.fluid.layers.maxout + :noindex: + +.. _api_fluid_layers_sigmoid: + sigmoid ------- .. autofunction:: paddle.fluid.layers.sigmoid :noindex: +.. _api_fluid_layers_logsigmoid: + logsigmoid ---------- .. autofunction:: paddle.fluid.layers.logsigmoid :noindex: +.. _api_fluid_layers_exp: + exp --- .. autofunction:: paddle.fluid.layers.exp :noindex: -relu ----- - -.. autofunction:: paddle.fluid.layers.relu - :noindex: +.. _api_fluid_layers_tanh: tanh ---- @@ -615,59 +1169,87 @@ tanh .. autofunction:: paddle.fluid.layers.tanh :noindex: +.. _api_fluid_layers_tanh_shrink: + tanh_shrink ----------- .. autofunction:: paddle.fluid.layers.tanh_shrink :noindex: +.. _api_fluid_layers_softshrink: + softshrink ---------- .. autofunction:: paddle.fluid.layers.softshrink :noindex: +.. _api_fluid_layers_sqrt: + sqrt ---- .. autofunction:: paddle.fluid.layers.sqrt :noindex: +.. _api_fluid_layers_abs: + abs --- .. autofunction:: paddle.fluid.layers.abs :noindex: +.. _api_fluid_layers_ceil: + ceil ---- .. autofunction:: paddle.fluid.layers.ceil :noindex: +.. _api_fluid_layers_floor: + floor ----- .. autofunction:: paddle.fluid.layers.floor :noindex: +.. _api_fluid_layers_cos: + +cos +--- + +.. autofunction:: paddle.fluid.layers.cos + :noindex: + +.. _api_fluid_layers_sin: + +sin +--- + +.. autofunction:: paddle.fluid.layers.sin + :noindex: + +.. _api_fluid_layers_round: + round ----- .. autofunction:: paddle.fluid.layers.round :noindex: +.. _api_fluid_layers_reciprocal: + reciprocal ---------- .. autofunction:: paddle.fluid.layers.reciprocal :noindex: -log ---- - -.. autofunction:: paddle.fluid.layers.log - :noindex: +.. _api_fluid_layers_square: square ------ @@ -675,157 +1257,522 @@ square .. autofunction:: paddle.fluid.layers.square :noindex: +.. _api_fluid_layers_softplus: + softplus -------- .. autofunction:: paddle.fluid.layers.softplus :noindex: +.. _api_fluid_layers_softsign: + softsign -------- .. autofunction:: paddle.fluid.layers.softsign :noindex: +.. _api_fluid_layers_brelu: + brelu ----- .. autofunction:: paddle.fluid.layers.brelu :noindex: +.. _api_fluid_layers_leaky_relu: + leaky_relu ---------- .. autofunction:: paddle.fluid.layers.leaky_relu :noindex: +.. _api_fluid_layers_soft_relu: + soft_relu --------- .. autofunction:: paddle.fluid.layers.soft_relu :noindex: +.. _api_fluid_layers_elu: + elu --- .. autofunction:: paddle.fluid.layers.elu :noindex: +.. _api_fluid_layers_relu6: + relu6 ----- .. autofunction:: paddle.fluid.layers.relu6 :noindex: +.. _api_fluid_layers_pow: + pow --- .. autofunction:: paddle.fluid.layers.pow :noindex: +.. _api_fluid_layers_stanh: + stanh ----- .. autofunction:: paddle.fluid.layers.stanh :noindex: +.. _api_fluid_layers_hard_sigmoid: + +hard_sigmoid +------------ + +.. autofunction:: paddle.fluid.layers.hard_sigmoid + :noindex: + +.. _api_fluid_layers_swish: + +swish +----- + +.. autofunction:: paddle.fluid.layers.swish + :noindex: + +.. _api_fluid_layers_uniform_random: + +uniform_random +-------------- + +.. autofunction:: paddle.fluid.layers.uniform_random + :noindex: + +.. _api_fluid_layers_hard_shrink: + hard_shrink ----------- .. autofunction:: paddle.fluid.layers.hard_shrink :noindex: +.. _api_fluid_layers_cumsum: + +cumsum +------ + +.. autofunction:: paddle.fluid.layers.cumsum + :noindex: + +.. _api_fluid_layers_thresholded_relu: + thresholded_relu ---------------- .. autofunction:: paddle.fluid.layers.thresholded_relu :noindex: -hard_sigmoid ------------- +tensor +====== -.. autofunction:: paddle.fluid.layers.hard_sigmoid +.. _api_fluid_layers_create_tensor: + +create_tensor +------------- + +.. autofunction:: paddle.fluid.layers.create_tensor :noindex: -swish +.. _api_fluid_layers_create_parameter: + +create_parameter +---------------- + +.. autofunction:: paddle.fluid.layers.create_parameter + :noindex: + +.. _api_fluid_layers_create_global_var: + +create_global_var +----------------- + +.. autofunction:: paddle.fluid.layers.create_global_var + :noindex: + +.. _api_fluid_layers_cast: + +cast +---- + +.. autofunction:: paddle.fluid.layers.cast + :noindex: + +.. _api_fluid_layers_concat: + +concat +------ + +.. autofunction:: paddle.fluid.layers.concat + :noindex: + +.. _api_fluid_layers_sums: + +sums +---- + +.. autofunction:: paddle.fluid.layers.sums + :noindex: + +.. _api_fluid_layers_assign: + +assign +------ + +.. autofunction:: paddle.fluid.layers.assign + :noindex: + +.. _api_fluid_layers_fill_constant_batch_size_like: + +fill_constant_batch_size_like +----------------------------- + +.. autofunction:: paddle.fluid.layers.fill_constant_batch_size_like + :noindex: + +.. _api_fluid_layers_fill_constant: + +fill_constant +------------- + +.. autofunction:: paddle.fluid.layers.fill_constant + :noindex: + +.. _api_fluid_layers_argmin: + +argmin +------ + +.. autofunction:: paddle.fluid.layers.argmin + :noindex: + +.. _api_fluid_layers_argmax: + +argmax +------ + +.. autofunction:: paddle.fluid.layers.argmax + :noindex: + +.. _api_fluid_layers_argsort: + +argsort +------- + +.. autofunction:: paddle.fluid.layers.argsort + :noindex: + +.. _api_fluid_layers_ones: + +ones +---- + +.. autofunction:: paddle.fluid.layers.ones + :noindex: + +.. _api_fluid_layers_zeros: + +zeros ----- -.. autofunction:: paddle.fluid.layers.swish +.. autofunction:: paddle.fluid.layers.zeros + :noindex: + +.. _api_fluid_layers_reverse: + +reverse +------- + +.. autofunction:: paddle.fluid.layers.reverse + :noindex: + +learning_rate_scheduler +======================= + +.. _api_fluid_layers_exponential_decay: + +exponential_decay +----------------- + +.. autofunction:: paddle.fluid.layers.exponential_decay + :noindex: + +.. _api_fluid_layers_natural_exp_decay: + +natural_exp_decay +----------------- + +.. autofunction:: paddle.fluid.layers.natural_exp_decay + :noindex: + +.. _api_fluid_layers_inverse_time_decay: + +inverse_time_decay +------------------ + +.. autofunction:: paddle.fluid.layers.inverse_time_decay + :noindex: + +.. _api_fluid_layers_polynomial_decay: + +polynomial_decay +---------------- + +.. autofunction:: paddle.fluid.layers.polynomial_decay + :noindex: + +.. _api_fluid_layers_piecewise_decay: + +piecewise_decay +--------------- + +.. autofunction:: paddle.fluid.layers.piecewise_decay + :noindex: + +.. _api_fluid_layers_noam_decay: + +noam_decay +---------- + +.. autofunction:: paddle.fluid.layers.noam_decay + :noindex: + +.. _api_fluid_layers_append_LARS: + +append_LARS +----------- + +.. autofunction:: paddle.fluid.layers.append_LARS + :noindex: + +detection +========= + +.. _api_fluid_layers_prior_box: + +prior_box +--------- + +.. autofunction:: paddle.fluid.layers.prior_box + :noindex: + +.. _api_fluid_layers_multi_box_head: + +multi_box_head +-------------- + +.. autofunction:: paddle.fluid.layers.multi_box_head + :noindex: + +.. _api_fluid_layers_bipartite_match: + +bipartite_match +--------------- + +.. autofunction:: paddle.fluid.layers.bipartite_match + :noindex: + +.. _api_fluid_layers_target_assign: + +target_assign +------------- + +.. autofunction:: paddle.fluid.layers.target_assign + :noindex: + +.. _api_fluid_layers_detection_output: + +detection_output +---------------- + +.. autofunction:: paddle.fluid.layers.detection_output + :noindex: + +.. _api_fluid_layers_ssd_loss: + +ssd_loss +-------- + +.. autofunction:: paddle.fluid.layers.ssd_loss + :noindex: + +.. _api_fluid_layers_detection_map: + +detection_map +------------- + +.. autofunction:: paddle.fluid.layers.detection_map + :noindex: + +.. _api_fluid_layers_iou_similarity: + +iou_similarity +-------------- + +.. autofunction:: paddle.fluid.layers.iou_similarity + :noindex: + +.. _api_fluid_layers_box_coder: + +box_coder +--------- + +.. autofunction:: paddle.fluid.layers.box_coder + :noindex: + +metric_op +========= + +.. _api_fluid_layers_accuracy: + +accuracy +-------- + +.. autofunction:: paddle.fluid.layers.accuracy + :noindex: + +.. _api_fluid_layers_auc: + +auc +--- + +.. autofunction:: paddle.fluid.layers.auc :noindex: tensor ====== +.. _api_fluid_layers_create_tensor: + create_tensor ------------- .. autofunction:: paddle.fluid.layers.create_tensor :noindex: +.. _api_fluid_layers_create_parameter: + create_parameter ---------------- .. autofunction:: paddle.fluid.layers.create_parameter :noindex: +.. _api_fluid_layers_create_global_var: + create_global_var ----------------- .. autofunction:: paddle.fluid.layers.create_global_var :noindex: +.. _api_fluid_layers_cast: + cast ---- .. autofunction:: paddle.fluid.layers.cast :noindex: +.. _api_fluid_layers_concat: + concat ------ .. autofunction:: paddle.fluid.layers.concat :noindex: +.. _api_fluid_layers_sums: + sums ---- .. autofunction:: paddle.fluid.layers.sums :noindex: +.. _api_fluid_layers_assign: + assign ------ .. autofunction:: paddle.fluid.layers.assign :noindex: +.. _api_fluid_layers_fill_constant_batch_size_like: + fill_constant_batch_size_like ----------------------------- .. autofunction:: paddle.fluid.layers.fill_constant_batch_size_like :noindex: +.. _api_fluid_layers_fill_constant: + fill_constant ------------- .. autofunction:: paddle.fluid.layers.fill_constant :noindex: +.. _api_fluid_layers_argmin: + +argmin +------ + +.. autofunction:: paddle.fluid.layers.argmin + :noindex: + +.. _api_fluid_layers_argmax: + +argmax +------ + +.. autofunction:: paddle.fluid.layers.argmax + :noindex: + +.. _api_fluid_layers_ones: + ones ---- .. autofunction:: paddle.fluid.layers.ones :noindex: +.. _api_fluid_layers_zeros: + zeros ----- .. autofunction:: paddle.fluid.layers.zeros :noindex: -topk ----- +.. _api_fluid_layers_reverse: -.. autofunction:: paddle.fluid.layers.topk +reverse +------- + +.. autofunction:: paddle.fluid.layers.reverse :noindex: +.. _api_fluid_layers_rank_loss: + +rank_loss +------- + +.. autofunction:: paddle.fluid.layers.rank_loss + :noindex: diff --git a/doc/fluid/api/metrics.rst b/doc/fluid/api/metrics.rst new file mode 100644 index 0000000000..0f54b2e2eb --- /dev/null +++ b/doc/fluid/api/metrics.rst @@ -0,0 +1,88 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +============= +fluid.metrics +============= + +.. _api_fluid_metrics_MetricBase: + +MetricBase +---------- + +.. autoclass:: paddle.fluid.metrics.MetricBase + :members: + :noindex: + +.. _api_fluid_metrics_CompositeMetric: + +CompositeMetric +--------------- + +.. autoclass:: paddle.fluid.metrics.CompositeMetric + :members: + :noindex: + +.. _api_fluid_metrics_Precision: + +Precision +--------- + +.. autoclass:: paddle.fluid.metrics.Precision + :members: + :noindex: + +.. _api_fluid_metrics_Recall: + +Recall +------ + +.. autoclass:: paddle.fluid.metrics.Recall + :members: + :noindex: + +.. _api_fluid_metrics_Accuracy: + +Accuracy +-------- + +.. autoclass:: paddle.fluid.metrics.Accuracy + :members: + :noindex: + +.. _api_fluid_metrics_ChunkEvaluator: + +ChunkEvaluator +-------------- + +.. autoclass:: paddle.fluid.metrics.ChunkEvaluator + :members: + :noindex: + +.. _api_fluid_metrics_EditDistance: + +EditDistance +------------ + +.. autoclass:: paddle.fluid.metrics.EditDistance + :members: + :noindex: + +.. _api_fluid_metrics_DetectionMAP: + +DetectionMAP +------------ + +.. autoclass:: paddle.fluid.metrics.DetectionMAP + :members: + :noindex: + +.. _api_fluid_metrics_Auc: + +Auc +--- + +.. autoclass:: paddle.fluid.metrics.Auc + :members: + :noindex: + diff --git a/doc/fluid/api/nets.rst b/doc/fluid/api/nets.rst index 7ae3187304..059733af18 100644 --- a/doc/fluid/api/nets.rst +++ b/doc/fluid/api/nets.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -==== -nets -==== +========== +fluid.nets +========== + +.. _api_fluid_nets_simple_img_conv_pool: simple_img_conv_pool -------------------- @@ -11,18 +13,24 @@ simple_img_conv_pool .. autofunction:: paddle.fluid.nets.simple_img_conv_pool :noindex: +.. _api_fluid_nets_sequence_conv_pool: + sequence_conv_pool ------------------ .. autofunction:: paddle.fluid.nets.sequence_conv_pool :noindex: +.. _api_fluid_nets_glu: + glu --- .. autofunction:: paddle.fluid.nets.glu :noindex: +.. _api_fluid_nets_scaled_dot_product_attention: + scaled_dot_product_attention ---------------------------- diff --git a/doc/fluid/api/optimizer.rst b/doc/fluid/api/optimizer.rst index 7a92caf9b7..8d792120f2 100644 --- a/doc/fluid/api/optimizer.rst +++ b/doc/fluid/api/optimizer.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -========= -optimizer -========= +=============== +fluid.optimizer +=============== + +.. _api_fluid_optimizer_SGD: SGD --- @@ -12,6 +14,8 @@ SGD :members: :noindex: +.. _api_fluid_optimizer_Momentum: + Momentum -------- @@ -19,6 +23,8 @@ Momentum :members: :noindex: +.. _api_fluid_optimizer_Adagrad: + Adagrad ------- @@ -26,6 +32,8 @@ Adagrad :members: :noindex: +.. _api_fluid_optimizer_Adam: + Adam ---- @@ -33,6 +41,8 @@ Adam :members: :noindex: +.. _api_fluid_optimizer_Adamax: + Adamax ------ @@ -40,6 +50,8 @@ Adamax :members: :noindex: +.. _api_fluid_optimizer_DecayedAdagrad: + DecayedAdagrad -------------- @@ -47,6 +59,17 @@ DecayedAdagrad :members: :noindex: +.. _api_fluid_optimizer_Ftrl: + +Ftrl +---- + +.. autoclass:: paddle.fluid.optimizer.Ftrl + :members: + :noindex: + +.. _api_fluid_optimizer_SGDOptimizer: + SGDOptimizer ------------ @@ -54,6 +77,8 @@ SGDOptimizer :members: :noindex: +.. _api_fluid_optimizer_MomentumOptimizer: + MomentumOptimizer ----------------- @@ -61,6 +86,8 @@ MomentumOptimizer :members: :noindex: +.. _api_fluid_optimizer_AdagradOptimizer: + AdagradOptimizer ---------------- @@ -68,6 +95,8 @@ AdagradOptimizer :members: :noindex: +.. _api_fluid_optimizer_AdamOptimizer: + AdamOptimizer ------------- @@ -75,6 +104,8 @@ AdamOptimizer :members: :noindex: +.. _api_fluid_optimizer_AdamaxOptimizer: + AdamaxOptimizer --------------- @@ -82,6 +113,8 @@ AdamaxOptimizer :members: :noindex: +.. _api_fluid_optimizer_DecayedAdagradOptimizer: + DecayedAdagradOptimizer ----------------------- @@ -89,9 +122,57 @@ DecayedAdagradOptimizer :members: :noindex: +.. _api_fluid_optimizer_RMSPropOptimizer: + +RMSPropOptimizer +---------------- + +.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer + :members: + :noindex: + +.. _api_fluid_optimizer_FtrlOptimizer: + +FtrlOptimizer +------------- + +.. autoclass:: paddle.fluid.optimizer.FtrlOptimizer + :members: + :noindex: + +.. _api_fluid_optimizer_Adadelta: + Adadelta --------------- +-------- + +.. autoclass:: paddle.fluid.optimizer.Adadelta + :members: + :noindex: + +.. _api_fluid_optimizer_ModelAverage: + +ModelAverage +------------ -.. autoclass:: paddle.fluid.optimizer.AdadeltaOptimizer +.. autoclass:: paddle.fluid.optimizer.ModelAverage :members: :noindex: + +.. _api_fluid_optimizer_Optimizer: + +Optimizer +--------- + +.. autoclass:: paddle.fluid.optimizer.Optimizer + :members: + :noindex: + +.. _api_fluid_optimizer_RMSPropOptimizer: + +RMSPropOptimizer +---------------- + +.. autoclass:: paddle.fluid.optimizer.RMSPropOptimizer + :members: + :noindex: + diff --git a/doc/fluid/api/param_attr.rst b/doc/fluid/api/param_attr.rst index 8e4ddb2b04..33035bbc7c 100644 --- a/doc/fluid/api/param_attr.rst +++ b/doc/fluid/api/param_attr.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -========== -param_attr -========== +================ +fluid.param_attr +================ + +.. _api_fluid_param_attr_ParamAttr: ParamAttr --------- @@ -12,6 +14,8 @@ ParamAttr :members: :noindex: +.. _api_fluid_param_attr_WeightNormParamAttr: + WeightNormParamAttr ------------------- diff --git a/doc/fluid/api/profiler.rst b/doc/fluid/api/profiler.rst index 74d102dcb0..c750a2d588 100644 --- a/doc/fluid/api/profiler.rst +++ b/doc/fluid/api/profiler.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -======== -profiler -======== +============== +fluid.profiler +============== + +.. _api_fluid_profiler_cuda_profiler: cuda_profiler ------------- @@ -11,15 +13,35 @@ cuda_profiler .. autofunction:: paddle.fluid.profiler.cuda_profiler :noindex: +.. _api_fluid_profiler_reset_profiler: + reset_profiler -------------- .. autofunction:: paddle.fluid.profiler.reset_profiler :noindex: +.. _api_fluid_profiler_profiler: + profiler -------- .. autofunction:: paddle.fluid.profiler.profiler :noindex: +.. _api_fluid_profiler_start_profiler: + +start_profiler +-------------- + +.. autofunction:: paddle.fluid.profiler.start_profiler + :noindex: + +.. _api_fluid_profiler_stop_profiler: + +stop_profiler +------------- + +.. autofunction:: paddle.fluid.profiler.stop_profiler + :noindex: + diff --git a/doc/fluid/api/recordio_writer.rst b/doc/fluid/api/recordio_writer.rst new file mode 100644 index 0000000000..f0c12fd115 --- /dev/null +++ b/doc/fluid/api/recordio_writer.rst @@ -0,0 +1,23 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +===================== +fluid.recordio_writer +===================== + +.. _api_fluid_recordio_writer_convert_reader_to_recordio_file: + +convert_reader_to_recordio_file +------------------------------- + +.. autofunction:: paddle.fluid.recordio_writer.convert_reader_to_recordio_file + :noindex: + +.. _api_fluid_recordio_writer_convert_reader_to_recordio_files: + +convert_reader_to_recordio_files +-------------------------------- + +.. autofunction:: paddle.fluid.recordio_writer.convert_reader_to_recordio_files + :noindex: + diff --git a/doc/fluid/api/regularizer.rst b/doc/fluid/api/regularizer.rst index 837c67111c..987eaea903 100644 --- a/doc/fluid/api/regularizer.rst +++ b/doc/fluid/api/regularizer.rst @@ -1,9 +1,11 @@ .. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` !DO NOT EDIT THIS FILE MANUALLY! -=========== -regularizer -=========== +================= +fluid.regularizer +================= + +.. _api_fluid_regularizer_append_regularization_ops: append_regularization_ops ------------------------- @@ -11,6 +13,8 @@ append_regularization_ops .. autofunction:: paddle.fluid.regularizer.append_regularization_ops :noindex: +.. _api_fluid_regularizer_L1Decay: + L1Decay ------- @@ -18,6 +22,8 @@ L1Decay :members: :noindex: +.. _api_fluid_regularizer_L2Decay: + L2Decay ------- @@ -25,16 +31,21 @@ L2Decay :members: :noindex: +.. _api_fluid_regularizer_L1DecayRegularizer: + L1DecayRegularizer ---------------------- +------------------ .. autoclass:: paddle.fluid.regularizer.L1DecayRegularizer :members: :noindex: +.. _api_fluid_regularizer_L2DecayRegularizer: + L2DecayRegularizer ---------------------- +------------------ .. autoclass:: paddle.fluid.regularizer.L2DecayRegularizer :members: :noindex: + diff --git a/doc/fluid/api/transpiler.rst b/doc/fluid/api/transpiler.rst new file mode 100644 index 0000000000..d2ac04f144 --- /dev/null +++ b/doc/fluid/api/transpiler.rst @@ -0,0 +1,59 @@ +.. THIS FILE IS GENERATED BY `gen_doc.{py|sh}` + !DO NOT EDIT THIS FILE MANUALLY! + +================ +fluid.transpiler +================ + +.. _api_fluid_transpiler_DistributeTranspiler: + +DistributeTranspiler +-------------------- + +.. autoclass:: paddle.fluid.transpiler.DistributeTranspiler + :members: + :noindex: + +.. _api_fluid_transpiler_InferenceTranspiler: + +InferenceTranspiler +------------------- + +.. autoclass:: paddle.fluid.transpiler.InferenceTranspiler + :members: + :noindex: + +.. _api_fluid_transpiler_memory_optimize: + +memory_optimize +--------------- + +.. autofunction:: paddle.fluid.transpiler.memory_optimize + :noindex: + +.. _api_fluid_transpiler_release_memory: + +release_memory +-------------- + +.. autofunction:: paddle.fluid.transpiler.release_memory + :noindex: + +.. _api_fluid_transpiler_HashName: + +HashName +-------- + +.. autoclass:: paddle.fluid.transpiler.HashName + :members: + :noindex: + +.. _api_fluid_transpiler_RoundRobin: + +RoundRobin +---------- + +.. autoclass:: paddle.fluid.transpiler.RoundRobin + :members: + :noindex: + diff --git a/doc/fluid/design/concepts/functions_operators_layers.md b/doc/fluid/design/concepts/functions_operators_layers.md index 30bc488a18..1f86b99e51 100644 --- a/doc/fluid/design/concepts/functions_operators_layers.md +++ b/doc/fluid/design/concepts/functions_operators_layers.md @@ -40,7 +40,7 @@ template class FCOp : public OperatorBase { public: void Run(...) { - add(mul(Input("X"), Input("W")), Input("b"); + add(mul(Input("X"), Input("W")), Input("b")); } }; REGISTER_OP(FCOp, "fc"); diff --git a/doc/fluid/design/concepts/lod_tensor.md b/doc/fluid/design/concepts/lod_tensor.md index a88292e788..748488f6d5 100644 --- a/doc/fluid/design/concepts/lod_tensor.md +++ b/doc/fluid/design/concepts/lod_tensor.md @@ -155,7 +155,7 @@ into offsets 3 2+3 4+5 1+9 2+10 3+12 ``` -so we know that the first sentence is from word 0 to word 3, and the second sentence from work 3 to word 5. +so we know that the first sentence is from word 0 to word 3, and the second sentence from word 3 to word 5. Similarly, the lengths in the top level LoD @@ -173,6 +173,7 @@ are transformed into offsets of elements/words as follows: ## Slicing of LoD Tensors + When we use the above 2-level LoD Tensor as the input to a nested-RNN, we need to retrieve certain sequences. Here we define the sequence identified by branch as the **-slice**. For example, the <2>-slice of above example is @@ -189,3 +190,22 @@ and the <2,0>-slice of above slice is 10 12 || ``` + +## Length Representation vs Offset Representation + +The offset representation is an implementation-oriented decision and it makes understanding the idea behind LoDTensor difficult. +Hence, we encapsulate this implementation detail in C++ and expose the original length representation in our Python API. +Specifically, we call this length representation `recursive_sequence_lengths` and users can use the following code to set or get the `recursive_sequence_lengths` of a LoDTensor in Python: +```Python +# length representation of lod called recursive_sequence_lengths +recursive_seq_lens = [[3, 1, 2], [2, 2, 1, 3, 1, 2]] +# Create a LoDTensor that has the above recursive_sequence_lengths info. +# This recursive_sequence_lengths will be converted to an offset representation of LoD in the C++ implementation under the hood. +tensor = fluid.LoDTensor(lod) + +# Set/Change the recursive_sequence_lengths info of LoDTensor +tensor.set_recursive_sequence_lengths([[3, 1, 2]]) +# Get the recursive_sequence_lengths info of a LoDTensor (the offset-based LoD representation stored in C++ will be converted +# back to length-based recursive_sequence_lengths), new_recursive_seq_lens = [[3, 1, 2]] +new_recursive_seq_lens = tensor.recursive_sequence_lengths() +``` diff --git a/doc/fluid/design/concepts/python_data_feeding.md b/doc/fluid/design/concepts/python_data_feeding.md new file mode 100644 index 0000000000..dffee8e02b --- /dev/null +++ b/doc/fluid/design/concepts/python_data_feeding.md @@ -0,0 +1,130 @@ +# Python Data Feeding + +In the former implementation of Paddle Fluid, there are two ways to feed data: + +- Use `reader_op` in backend C++ side. This method only supports data feeding from recordio files and random data generators, but supports many kinds of `decorated_readers`. For examples, `double_buffer_reader` uses two threads to achieve better performance: one for time-consuming I/O operations, and the other for `Executor::Run()`. See [C++ Data Feeding](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/cpp_data_feeding.md) for details. + +- Feed data directly using `DataFeeder.feed()` in Python codes. It is more flexible than the first way. Many kinds of preprocessing steps can be performed before feeding using Python or any other languages, instead of adding many uncommon `operators` in C++ side. But this method is less efficient: the program cannot read the next mini-batch data before `Executor::Run()` ends. Moreover, `decorated_readers` such as `double_buffer_reader` cannot be used for better performance. + +In this document, we design a Python Data Feeding process combining the efficiency of the first way and the flexibility of the second way. A data queue `LoDTensorBlockingQueue` is designed to be shared by the Python and C++ side, while `LoDTensorArray` is pushed into the queue in Python side and `reader_op` in C++ side reads out the data from the queue. + + +## Design of LoDTensorBlockingQueue +`LoDTensorBlockingQueue` is a blocking queue with a fixed `capacity` and accepts `std::vector` with shapes indicated by `dims`. Since `LoDTensorBlockingQueue` must be constructed using `capacity` and `dims`, it cannot be a `Variable` type. Therefore, a `LoDTensorBlockingQueueHolder` is designed to defer construction of `LoDTensorBlockingQueue`. + +```C++ +class LoDTensorBlockingQueueHolder; + +class LoDTensorBlockingQueue { + friend class LoDTensorBlockingQueueHolder; + private: + // `LoDTensorBlockingQueue` can only be constructed by + // `LoDTensorBlockingQueueHolder::InitOnce()` + LoDTensorBlockingQueue(size_t capacity, const std::vector& dims); + + public: + size_t Size() const { return queue_.Size(); } // Get the current size of the queue + + size_t Cap() const { return queue_.Cap(); }// Get the capacity of the queue + + void Close() { return queue_.Close(); } + + bool IsClosed() const { return queue_.IsClosed(); } + + // Block if Size() == Cap() + // Return false only when queue_.IsClosed() == true + bool Push(const std::vector &lod_tensor_vec); + + // Block if Size() == 0. + // *Success == false when queue_.IsClosed() == true + std::vector Pop(bool *success = nullptr); + + private: + // Use reader::BlockingQueue as the inner data structure + BlockingQueue> queue_; + std::vector dims_; +}; + +class LoDTensorBlockingQueueHolder { + public: + // Call the constructor of `LoDTensorBlockingQueue` to create queue_ + // `InitOnce` can only called once, otherwise an exception would raise + void InitOnce(size_t capacity, const std::vector& dims) { + PADDLE_ENFORCE(queue_ == nullptr); + queue_.reset(new LoDTensorBlockingQueue(capacity, dims)); + } + + const std::shared_ptr& GetQueue() const { return queue_; } + + private: + std::shared_ptr queue_; +}; +``` + +There are some major things that must be concerned: +- `LoDTensorBlockingQueueHolder` should be a `Variable` in global scope, so that `reader_op` can find it when reading data. +- A `Variable` of `LoDTensorBlockingQueueHolder` but not `VarDesc` must be created in Python code before `Executor::Run()` so that `Executor::Run()` can get the feeding data when it is called. +- `Create_reader_op` should accept the name of the `LoDTensorBlockingQueueHolder` variable as an input. + + +## Release of the GIL in pybind +`Pybind11::gil_scoped_release` is used to release GIL (Global Interpreter Lock) when `LoDTensorBlockingQueue::Push()` or `Executor::Run()` method are invoked in Python side, making `LoDTensorBlockingQueue::Push()` and `Executor::Run()` run in parallel. + + +## Design of PyReader +`PyReader` is a reader which holds a `LoDTensorBlockingQueue` object. +```C++ +class PyReader : public ReaderBase { + public: + explicit PyReader(const std::shared_ptr& queue); + + void ReadNext(std::vector* out) override { + bool success; + *out = queue_->Pop(&success); + if (!success) out->clear(); + } + + void ReInit() override { return; } + + private: + std::shared_ptr queue_; +}; +``` + + +## Design of CreatePyReaderOp +`CreatePyReaderOp` is used to create the `PyReader` object. It requires an input `blocking_queue` which indicates the name of the `LoDTensorBlockingQueueHolder` variable. +```C++ +class CreatePyReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + if (out->Get() != nullptr) return; + + const std::string& queue_name = Input("blocking_queue"); + auto* queue_holder_var = scope.FindVar(queue_name); + PADDLE_ENFORCE(queue_holder_var != nullptr); + auto* queue_holder = queue_holder_var + ->template GetMutable(); + out->Reset(new PyReader(queue_holder->GetQueue())); + } +}; +``` + +## Design of Python codes +The design of Python codes are as follows. First, we construct a variable of `LoDTensorBlockingQueueHolder` and init it with given parameters, returning the `LoDTensorBlockingQueue` object after initialization. After that, a layer of `CreatePyReaderOp` is constructed and accepts the name of the `LoDTensorBlockingQueueHolder` variable. The `LoDTensorBlockingQueue` object and result of the layer are both returned. +```Python +def py_reader(capacity, shapes): + queue_name = unique_name.generate("lod_tensor_blocking_queue") + var = global_scope().var(feeder_name) # create LoDTensorBlockingQueueHolder Variable + feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) # init the queue + out = create_var() + create_py_reader_op_with_queue_name( + inputs={'blocking_queue': queue_name}, + outputs={'Out':[out]}) + return out, feed_queue +``` diff --git a/doc/fluid/design/concepts/var_desc.md b/doc/fluid/design/concepts/var_desc.md index 6750323c01..8db67f6703 100644 --- a/doc/fluid/design/concepts/var_desc.md +++ b/doc/fluid/design/concepts/var_desc.md @@ -35,7 +35,7 @@ The computation `Program` consists of nested `Blocks`. Each `Block` will consist ## Definition of VarType -A VarDesc should have a name, type and whether or not it is persistable. The are different kinds of variable types supported in PaddlePaddle, apart from the POD_Types like: `LOD_TENSOR`, `SELECTED_ROWS`, `FEED_MINIBATCH`, `FETCH_LIST`, `STEP_SCOPES`, `LOD_RANK_TABLE`, `LOD_TENSOR_ARRAY`, `PLACE_LIST`, `READER` and `CHANNEL`. These are declared inside `VarType`. A `VarDesc` then looks as the following: +A VarDesc should have a name, type and whether or not it is persistable. There are different kinds of variable types supported in PaddlePaddle, apart from the POD_Types like: `LOD_TENSOR`, `SELECTED_ROWS`, `FEED_MINIBATCH`, `FETCH_LIST`, `STEP_SCOPES`, `LOD_RANK_TABLE`, `LOD_TENSOR_ARRAY`, `PLACE_LIST`, `READER` and `CHANNEL`. These are declared inside `VarType`. A `VarDesc` then looks as the following: ```proto message VarDesc { diff --git a/doc/fluid/design/dist_train/async_update.md b/doc/fluid/design/dist_train/async_update.md index 6a0835b761..248d2ec18d 100644 --- a/doc/fluid/design/dist_train/async_update.md +++ b/doc/fluid/design/dist_train/async_update.md @@ -4,34 +4,37 @@ For the typical synchronous distributed training, some significant steps are as follows: -1. A Trainer will compute the gradients and SEND them to the Parameter Server(PServer) nodes. -1. After the PServer node received gradients came from all the Trainers, It will aggregate the +1. A trainer process will compute the gradients and **send** them to the parameter server (PS) nodes. +1. After the PS node received gradients came from all the Trainers, It will aggregate the gradient variables for the same parameter into one gradient variable and then apply the aggregated gradient to the respective parameter, finally using an optimize algorithms(SGD, Monument...) to update the parameters. -1. The Trainer would wait for the PServers finished the optimize stage, and GET the parameters from PServer, +1. The Trainer would wait for the PS finished the optimize stage, and GET the parameters from PS, so all the Trainers would get the same parameters. -In the synchronously distributed training, there should be a `Barrier` to synchronise the -parameters after the optimizing stage. The performance of a distributed training job would -depend on the slowest node if there were hundreds or thousands of training nodes in a -Job, the performance of synchronously distributed training might be very poor because of -the slow node. So this design doc would introduce an approach to implement -*asynchronously* distributed training in PaddlePaddle Fluid. +In Synchronous Distributed Training, there is a **barrier** on each PS to wait until all trainers processes +have completed running current mini-batch. After that, all trainers can continue to run the next +mini-batch. So, we can find that the overall performance of Synchronous Distributed Training depends +on the slowest node. + +In Asynchronous Distributed Training, we don't need to wait for a global mini-bach, the optimizer on +the PS will run immediately when the gradient is uploaded to the PS from one trainer. This mode would +train such models that achieve scaling, better throughput. In this design doc, we will introduce how to +implement the Asynchronous Distributed Training base on PaddlePaddle Fluid. ## Design -As the figure above, we describe a global view of asynchronously update process and use +As the figure above, we describe a global view of the asynchronous update process and use the parameter `w1` as an example to introduce the steps: 1. For each gradient variables, they may distribute on different GPU card and aggregate them while they are all calculated. -1. Split the gradient variable into multiple blocks according to the number of PServer +1. Split the gradient variable into multiple blocks according to the number of PS instances and then send them. -1. PServer would run an `Optimize Block` using a specified optimize algorithm to update +1. PS would run an `Optimize Block` using a specified optimize algorithm to update the specified parameter. -1. The trainer will fetch latest parameter from PServer before running forward Op which depends +1. The trainer will fetch the latest parameter from PS before running forward Op which depends on the specified parameter. 1. Broadcast the received variable into multiple GPU cards and continue to run the next mini-batch. @@ -40,8 +43,8 @@ mini-batch. - For the multiple devices distributed training, we need to aggregate the gradient variables which placed on different devices firstly and then schedule a `SendVars` Operator to -send the gradient variables to the multiple PServer instances. -- Schedule `FetchVars` operator to fetch the latest parameter from PServer before running +send the gradient variables to the multiple PS instances. +- Schedule `FetchVars` operator to fetch the latest parameter from PS before running the forward ops. - There could be a large number of gradient variables to be sent, so we need to use another thread pool(IO Threadpool) whose a number of the schedulable threads is larger than the diff --git a/doc/fluid/design/dist_train/dist_train_nccl2.md b/doc/fluid/design/dist_train/dist_train_nccl2.md new file mode 100644 index 0000000000..aa7455ec5d --- /dev/null +++ b/doc/fluid/design/dist_train/dist_train_nccl2.md @@ -0,0 +1,35 @@ +# Distributed Training with NCCL2 + +We design a pattern that can enable training with `ParallelExecutor` and +using [NCCL2](https://developer.nvidia.com/nccl) as it's collective +communication library. + +In `ParallelExecutor` we can use `AllReduce` or `Reduce` and `Broadcast` +to do multi GPU training. And if we initialize NCCL2 communicators as +ranks in a distributed environment, we can simply run the `ParallelExecutor` +as a distributed program! The only thing that may be different than in +the single node version is that we need to broadcast the NCCL unique ID +to all the nodes, and initialize communicators using that ID, so NCCL2 +will know each other as ranks. + +To achieve this feature, we introduce a new operator: `gen_nccl_id` op, +so we are ***not*** "bind to" running NCCL2 with MPI, we can run it in +what ever platform you like. + +It have two running modes: + +1. Generate and broadcast mode, which should be used on trainer 0; +1. Listen and fetch mode, which should be used on trainers other than 0. + +In both two modes, this op can save the NCCL ID into current scope as a +persistable variable, Then we can insert this op at the end of +"startup program" of fluid, so that all workers can get the same ID to +initialize NCCL communicator objects. + + + +The above figure indicates the general process when training with NCCL2 +distributed. Each trainer have the number of communicators equal to the +number of GPUs, but the ranks should match the global ranks number: here +we have total 8 GPUs, so `nranks==8`, for each trainer, the ranks should +be from 0 ~ 3 on trainer 0 and 4 ~ 7 on trainer 1. diff --git a/doc/fluid/design/dist_train/distributed_lookup_table_design.md b/doc/fluid/design/dist_train/distributed_lookup_table_design.md index 9887291389..e284e1ec5c 100644 --- a/doc/fluid/design/dist_train/distributed_lookup_table_design.md +++ b/doc/fluid/design/dist_train/distributed_lookup_table_design.md @@ -1,6 +1,6 @@ # Design Doc: Distributed Lookup Table Operator -A lookup table operator in PaddlePaddle where the table could be out +A distribute lookup table operator in PaddlePaddle where the table could be out of the memory of a computer. ## Background @@ -24,14 +24,14 @@ memory, so we'd need a distributed storage service, which supports the lookup of rows. The following figure illustrates the multiplication of x with two -non-zero elements, or say, two symbols, and a lookup table W: +non-zero elements, or say two symbols, and a lookup table W: ![lookup table](./src/lookup_table.png) ### The Backward Algorithm The backward algorithm computes W'(x) using W(x). W'(x) has the same -scale of size as W(x) and is much smaller than W. +the scale of size as W(x) and is much smaller than W. To optimize W given W', we can do simple SGD update: @@ -44,85 +44,46 @@ $$W = f(W, W')$$ The following figure illustrates the backward pass of the lookup operator: ![lookup table training](./src/lookup_table_training.png) -## Distributed Storage Service - -The forward algorithm requires a distributed storage service for W. -The backward algorithm prefers that the storage system can apply the -optimization algorithm on W. The following two sections describe two -solutions -- the former doesn't require that the storage service can -do optimization, the latter does. - -### Storage Service Doesn't Optimize - -In this design, we use highly-optimized distributed storage, e.g., -memcached, as the storage service, and we run the optimization -algorithm on parameter servers of PaddlePaddle. The following figure -illustrates the training process. - - - - - -Each trainer runs the forward and backward passes using their local -data: - -1. In the forward pass, when a trainer runs the forward algorithm of a - lookup operator, it retrieves W(x) from the storage service. -1. The trainer computes W'(x) in the backward pass using W(x). - -During the global update process: - -1. Each trainer uploads its W'(x) to parameter servers. -1. The parameter server runs the optimization algorithm, e.g., the - Adam optimization algorithm, which requires that - 1. The parameter server retrieves W(x) from memcached, and - 1. The parameter server pushes $\Delta W(x)=f(W(x), lambda \sum_j - W'(x))$ to memcached, where $f$ denotes the optimization - algorithm. - -### Storage Service Does Optimize - -This design is very similar to the above one, except that the -optimization algorithm $f$ runs on the storage service. - -- Pro: parameter servers do not retrieve W(x) from the storage - service, thus saves half network communication. -- Con: the storage service needs to be able to run the optimization - algorithm. - -## Conclusion - -Let us do the "storage service does not optimize" solution first, as a -baseline at least, because it is easier to use a well-optimized -distributed storage service like memcached. We can do the "storage -service does optimize" solution later or at the same time, which, if -implemented carefully, should have better performance than the former. +## Distributed Lookup Table +### Problem 1: The lookup table may be very large. + + In the condition like the search engine and recommendation system, the number of feature Id may be very large, say 100,000,000,000, then for a float value lookup table of size 8, the total size of the table is: + + ``` + 100,000,000,000 * 8 * 4(Bytes) = 2980.23 GB + ``` + +### Solution: Distributed storage + +1. Paddle use [SelectedRows](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/modules/selected_rows.md) as the storage format for the lookup table, the lookup table parameter will be split to multi-machine according to the hash of the feature ID, and data will also be split and send to the same machine to prefetch the parameter. + +1. For common parameters, the trainer will get the whole parameter for training, but for the big lookup table, the trainer can not store the whole parameter. Because the input data feature is very sparse, every time we only need a few parameters for training, so we use `prefetch_op` to only prefetch the parameter needed to trainer. + +### Problem 2. The Id in the lookup table is not sure before training. + + The feature Id is calculated by the hash function because the feature data source is so large, we can not get all the Id before training. So we can not initialize the table before training. + +### Solution: Id auto growth + +At the beginning of training, paddle only malloc the memory for the lookup table at parameter server side, the Id and it's value will not be initialized. During training, when a parameter server received an Id, if it is already in the lookup table, it will return the existing parameter, if the Id does not exist, paddle will add it into the lookup table and initialize the value for it. + +### Problem 3: parameter load and save + +For common parameters, paddle use trainer to save and load them. But for distributed lookup table, trainer cannot do this because it's large size. + +### Solution: Parameter server side save and load + +Paddle support parameter server side save and load for distribute lookup table. Each machine of parameter servers will only save and load part of the whole table. + +## Architecture +The whole architecture of the distribute lookup table is as below: + +### Training steps: +1. Read a batch of data, the data is feature ids. +1. The input ids will be split by `split_ids_op` with the same hash function of the lookup table. +1. The `prefetch_op` use the split result to prefetch parameters back from the lookup table. +1. Run forward-backward to get the gradient of the lookup table. +1. `split_ids_op` split the gradient and then use `send_op` to the parameter server. +1. parameter server update the table with the received gradient. + +![distribute lookup table](./src/distributed_lookup_table.jpeg) diff --git a/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle b/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle new file mode 100644 index 0000000000..65dfdbbacd Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_lookup_table.graffle differ diff --git a/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg b/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg new file mode 100644 index 0000000000..5353a16fd3 Binary files /dev/null and b/doc/fluid/design/dist_train/src/distributed_lookup_table.jpeg differ diff --git a/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.graffle b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.graffle new file mode 100644 index 0000000000..96ca6d48f4 Binary files /dev/null and b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.graffle differ diff --git a/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.png b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.png new file mode 100644 index 0000000000..afa25ab3b4 Binary files /dev/null and b/doc/fluid/design/dist_train/src/fluid_lookup_remote_table.png differ diff --git a/doc/fluid/design/dist_train/src/ncc2_design.graffle b/doc/fluid/design/dist_train/src/ncc2_design.graffle new file mode 100644 index 0000000000..7d2753bbb0 Binary files /dev/null and b/doc/fluid/design/dist_train/src/ncc2_design.graffle differ diff --git a/doc/fluid/design/dist_train/src/ncc2_design.png b/doc/fluid/design/dist_train/src/ncc2_design.png new file mode 100644 index 0000000000..da0d5ee81f Binary files /dev/null and b/doc/fluid/design/dist_train/src/ncc2_design.png differ diff --git a/doc/fluid/design/ir/overview.md b/doc/fluid/design/ir/overview.md new file mode 100644 index 0000000000..83ef97c99e --- /dev/null +++ b/doc/fluid/design/ir/overview.md @@ -0,0 +1,185 @@ +## Motivation + +There is a `gap` between the `Program` defined by +user and the `Executable` that can be scheduled +efficiently on heterogeneous hardware, either locally +or distributedly. + +Usually, the `gap` is bridged by + +* A serious transformations with defined order. + +* These transformations usually involve +`insert, delete, clustering, split, dependency analysis`. + +* Has a simple way to verify and debug each transformation. + +* Flexible to add, remove or customize transformations to fit +the requirements of various algorithms (models) and hardware secenarios. + +Some other events also push us to a better unified pattern. + +* The deep learning framework is built around the concepts of graphs. +To leverage tools such as compilation (e.g. TVM and nGraph) or +cross-framework conversion (e.g. ONNX), we also need a intermediate +representation that can be connected to the rest of the ecosystem. + + +We need a unified pattern to naturally support the requirements +described above. The pattern should fit both training, inference +and other offline serielized model transformations. +Learned from LLVM and other deep learning framework, we draft the +design below. + + +## Design + +### Major Concepts + +#### Node + +`Node` represents an operation that performs some computation or +a variable that is input or output of operation. + +`Node`s are connected to other `Node`s via inputs and outputs. + +Other properties (maybe device placement information) can be added +to `Node` in the future if it's a +common requirement of many other `Pass`es. Otherwise, it should live +in a `Node` wrapper class that is private to some `Pass` or be +a local member of a `Pass`. + +#### Graph + +`Graph` contains a list of `Node`s, which are connected to +each other via inputs and outputs. + +TODO: Better definitions for the graph. + +`Graph` can also contain `Attribute`s. `Attribute`s +can be `any` thing. For example, it can be a list of "wraper" +nodes. The `wrapper` nodes compose `Node`s and provide +helper method for execution or transformation. `Attribute` +can also contain other things that describe some properties of +the `Graph` or `Graph` nodes. `Attribute` can be passed +across `Pass`. However, it should be used with care. + +```cpp +class Graph { + public: + explicit Graph(const ProgramDesc &program); + + bool Has(const std::string &attr_name) const; + + template + AttrType &Get(const std::string &attr_name) const; + + template + void Set(const std::string &attr_name, AttrType *attr); + const std::unordered_set &Nodes() const; + + // Create a normal variable with non-null VarDesc. + ir::Node *CreateVarNode(VarDesc *var_desc); + + // Create a normal runnable operator with OpDesc. + ir::Node *CreateOpNode(OpDesc *op_desc); + + // Create a control dependency var that connects 2 operations. The + // var doesn't hold any data. Other than that, it's no different from + // other var, considering dependency analysis. + ir::Node *CreateControlDepVar(); + + // A more free style way of creating a graph node. Mostly use for test + // or "copy" from another node. Avoid using it if possible. + ir::Node *CreateEmptyNode(const std::string &name, ir::Node::Type type); + + // Clear all node information of the graph and return the ownership of the + // nodes. + std::vector> ReleaseNodes(); +}; +``` + +#### Pass + +`Pass` represents a transformation of `Graph`. Its input +is a `Graph` and its output is also a `Graph`. For example, +a `Pass` can simply print out the `Graph`. A `Pass` +can also fuse some `Graph`'s `Node`s. + +```cpp +class Pass { + public: + + std::unique_ptr Apply(std::unique_ptr graph) const { + // Some correctness check. + auto new_graph = ApplyImpl(std::move(graph)); + // Some correctness check. + return new_graph; + } + + // Get a reference to the attributed previously set. + template + AttrType &Get(const std::string &attr_name) const; + + // Set a pointer to the attribute. Pass takes ownership of the attribute. + template + void Set(const std::string &attr_name, AttrType *attr) ; + + // Set a pointer to the attribute. Pass doesn't take ownership. Caller + // should delete the attribute. + template + void SetNotOwned(const std::string &attr_name, AttrType *attr); + + protected: + virtual std::unique_ptr ApplyImpl(std::unique_ptr graph) const = 0; +}; + +// In my_pass.cc +class MyPass : public Pass { + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const override { + // do something. + return graph; + } +} +REGISTER_PASS(my_pass, MyPass) +.RequirePassAttr("places") +.RequireGraphAttr("dep_vars"); + + +// To use the pass. +auto my_pass = ir::PassRegistry::Instance().Get("my_pass"); +graph = my_pass->Apply(std::move(graph)); +// Note: to force link my_pass.cc, in the code: +USE_PASS(my_pass); +``` + +#### Optimize + +`Optimize` contains a series of `Pass` with defined order. +`Optimize` transforms a `Graph` that only contains raw +modeling logic to a `Graph` that can be run efficiently while +maintaining the original modeling logic. + + +### Optimize Process + +* Program is first converted to Graph. +* Graph goes through a series of Pass +* Graph is transformed from raw model logic to a +form that is efficient to execute. + +``` +// Program->ProgramToGraph->Graph->Pass1->Graph->Pass2->Graph->Pass3->Graph->Executor +auto graph = Graph(program); +graph = PassRegistry::Instance().Get("op_fuse_pass").Apply(std::move(grah)); +// For more complex Pass, Optimize Process can provide Pass attributes. +auto mem_opt_pass = PassRegistry::Instance().Get("memory_optimization_pass"); +mem_opt_pass.SetNotOwned("optimize_level", 1); +mem_opt_pass->Apply(std::move(graph)); +graph = PassRegistry::Instance().Get("multi_devices_pass").Apply(std::move(grah)); +graph = PassRegistry::Instance().Get("multi_devices_check_pass").Apply(std::move(grah)); +Executor exe; +exe.Run(graph); + +``` diff --git a/doc/fluid/design/modules/python_api.md b/doc/fluid/design/modules/python_api.md index 265732a348..83af4e5548 100644 --- a/doc/fluid/design/modules/python_api.md +++ b/doc/fluid/design/modules/python_api.md @@ -98,13 +98,13 @@ class Block(objects): def append_operator(self, ...): self.ops.append(Operator(self, ...)) - def prepend_operator(self, ...): # Parameter's ctor prepands initialize operators. + def _prepend_operator(self, ...): # Parameter's ctor prepands initialize operators. self.ops.prepend(Operator(self, ...)) ``` `create_parameter` is necessary because parameters are global variables, defined in the global block, but can be created in some sub-blocks. For example, an FC layer in the step block of an RNN operator. -`prepend_operator` is necessary because the constructor of `Parameter` needs to create the initialize (or load) operator of the parameter, and would like to put it in the *preamble* of the global block. +`_prepend_operator` is necessary because the constructor of `Parameter` needs to create the initialize (or load) operator of the parameter, and would like to put it in the *preamble* of the global block. ### Operator diff --git a/doc/fluid/design/motivation/api.md b/doc/fluid/design/motivation/api.md index e6a4638d91..bc222564e3 100644 --- a/doc/fluid/design/motivation/api.md +++ b/doc/fluid/design/motivation/api.md @@ -77,8 +77,7 @@ print "The sematic-vector of testA: ", paddle.infer(fA, parameters, testA) ### Example 2. Sharing Parameters between "Models" -We use [GAN](https://github.com/PaddlePaddle/book/tree/develop/gan) in -this example. In the following example program, `d0` and `d1` +We use GAN in this example. In the following example program, `d0` and `d1` correspond to the two networks in the following figure: diff --git a/doc/fluid/design/multi_devices/kernel_selection.md b/doc/fluid/design/multi_devices/kernel_selection.md index 967317d5d2..4d2aab87b8 100644 --- a/doc/fluid/design/multi_devices/kernel_selection.md +++ b/doc/fluid/design/multi_devices/kernel_selection.md @@ -74,10 +74,10 @@ void OperatorWithKernel::Run( auto kernel_type_for_var = this->GetKernelTypeForVar(...); if (kernel_type_for_var.place_ != expected_kernel_key.place_) { auto* trans_var = new_scope.Var(var_name); - auto* out = DataTransform(expected_kernel_key, + auto* out = TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in); - CopyVariableWithTensor(...); + SetTensorToVariable(...); } } diff --git a/doc/fluid/design/multi_devices/operator_kernel_type.md b/doc/fluid/design/multi_devices/operator_kernel_type.md index 8c1bc8f76a..5e391bd62b 100644 --- a/doc/fluid/design/multi_devices/operator_kernel_type.md +++ b/doc/fluid/design/multi_devices/operator_kernel_type.md @@ -75,7 +75,7 @@ Different layout leads to different implementation of the operator kernel. There - The inference of Layout is at run-time, not at compile-time. -- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://01org.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. +- Every operator has to implement different kernels for different layouts. Let's take MKLDNN as an example. If we want to implement an MKLDNN convolution operator, we have to implement all the kernels for different layouts, which are listed [here](http://intel.github.io/mkl-dnn/structmkldnn_1_1memory.html). And we will have a special macro to register kernels for MKLDNN operators. `Layout` is also defined as a enum variable: diff --git a/doc/fluid/design/quantization/fixed_point_quantization.md b/doc/fluid/design/quantization/fixed_point_quantization.md new file mode 100644 index 0000000000..085352fc56 --- /dev/null +++ b/doc/fluid/design/quantization/fixed_point_quantization.md @@ -0,0 +1,110 @@ +Fixed-point quantization uses lower bits, for example, 2-bit, 3-bit or 8-bit fixed point to represent weights and activations, which usually are in singe-precision float-point with 32 bits. The fixed-point representation has advantages in reducing memory bandwidth, lowering power consumption and computational resources as well as the model storage requirements. It is especially important for the inference in embedded-device deployment. + +According to some experiments, the apporach to quantize the model trained in float point directly works effectively on the large models, like the VGG model having many parameters. But the accuracy drops a lot for the small model. In order to improve the tradeoff between accuracy and latency, many quantized training apporaches are proposed. + +This document is to design a quantized training framework on Fluid. The first part will introduce how to quantize, The second part will describe the quantized training framework. The last part will illustrate how to calculate the quantization scale. + + +### How to quantize + +There are many ways to quantize the float value to fixed-point value. For example: + +$$ r = min(max(x, a), b)$$ +$$ s = \frac{b - a}{n - 1} $$ +$$ q = \left \lfloor \frac{r - a}{s} \right \rceil $$ + +where, $x$ is the float value to be quantized, $[a, b]$ is the quantization range, $a$ is the minimum value and $b$ is the maximal value. $\left \lfloor \right \rceil$ denotes rounding to the nearest integer. If the quantization level is $k$, $n$ is $2^k$, for example, $k$ is 8 and $n$ is 256. $q$ is the quantized integer. + + +The quantization we applied is parameterized by the number of quantization levels and maximum absolute value: + +$$ M = max(abs(x)) $$ +$$ q = \left \lfloor \frac{x}{M} * (n - 1) \right \rceil $$ + +where, $x$ is the float value to be quantized, $M$ is maximum absolute value. $\left \lfloor \right \rceil$ denotes rounding to the nearest integer. For 8 bit quantization, $n=2^{8}=256$. $q$ is the quantized integer. + + +Wether the *min-max* quantization or *max-abs* quantization, they also can be represent: + +$q = scale * r + b$ + +We call *min-max*, *max-abs* as the quantization arguments, also call them quantization scale or quantization range. + + +How to calculate the quantization scale (or maximum absolute value) for inference will be described in the last part. + + +### Training Framework + +#### Forward pass + +The forward pass is simulated quantization, see Figure 1. + +The training framework is as following figure. + +

+
+Figure 1. Forward in training with simulated quantization. +

+ +- Firstly, both input and weight will be quantized to 8-bit integers. +- Second, do the multiplication (or convolution) operation with integers. +- Third, dequantize the multiplication (or convolution) results to 32-bit float point. +- Finally, do bias-addition in float type of 32 bit. Here, the bias is not quantized. + +For general matrix multiplication (GEMM), quantize for $X$ and $W$: + +$$ X_q = \left \lfloor \frac{X}{X_m} * (n - 1) \right \rceil $$ +$$ W_q = \left \lfloor \frac{W}{W_m} * (n - 1) \right \rceil $$ + +Do GEMM: + +$$ Y = X_q * W_q $$ + + +Dequantize $Y$: + +$$ +\begin{align} +Y_{dq} &=\frac{Y}{(n - 1) * (n - 1)} * X_m * W_m \\\ + &=\frac{X_q * W_q}{(n - 1) * (n - 1)} * X_m * W_m \\\ + &=(\frac{X_q}{n - 1} * X_m) * (\frac{W_q}{n - 1} * W_m) +\end{align} +$$ + +From these formulas, dequantization also can be moved before GEMM, do dequantization for $Xq$ and $Wq$ at first, then do GEMM. The forward workflow in training is equivalent to following framework. + +

+
+Figure 2. Equivalent forward in training with simulated quantization. +

+ +We use this equivalent workflow in the training. In our desigin, there is a quantization transpiler to insert the quantization operator and the de-quantization operator in the Fluid `ProgramDesc`. Since the outputs of quantization and de-quantization operator are still in floating point, they are called faked quantization and de-quantization operator. And the training framework is called simulated quantization. + +#### Backward pass + +See Figure 3. The gradients are calculated by dequantized weights and activations. All inputs and outputs are float point with 32-bit. And in the weight updating process, the gradients will be added to the original weight, not the quantized or dequantized weights. + +

+
+Figure 3. Backward and weight updating in training with simulated quantization. +

+ +So the quantization transipler will change some inputs of the corresponding backward operators. + +### How to calculate quantization scale + +There are two strategies to calculate quantization scale, we call them dynamic and static strategy. The dynamic strategy calculates the quantization scale value each iteration. The static strategy keeps the quantization scale for different inputs. + +For weights, we apply the dynamic strategy in the training, that is to say, the quantization scale will be recalculated during each iteration until the traning is finished. + +For activations, the quantization scales are estimated during training, then used in inference. There are several different ways to estimate them: + + +1. Calculate the mean of maximum absolute during a window. +2. Calculate the max of maximum absolute during a window. +3. Calculate the running mean of maximum absolute during a window, as follows: + + $$ Vt = (1 - k) * V + k * V_{t-1} $$ + + where, $V$ is the maximum absolute value of current batch, $Vt$ is the running mean value. $k$ is a factor, such as 0.9. diff --git a/doc/fluid/design/quantization/quantization_backward_and_optimization.png b/doc/fluid/design/quantization/quantization_backward_and_optimization.png new file mode 100644 index 0000000000..84f8235ab8 Binary files /dev/null and b/doc/fluid/design/quantization/quantization_backward_and_optimization.png differ diff --git a/doc/fluid/design/quantization/quantization_equivalent_forward.png b/doc/fluid/design/quantization/quantization_equivalent_forward.png new file mode 100644 index 0000000000..df49c86453 Binary files /dev/null and b/doc/fluid/design/quantization/quantization_equivalent_forward.png differ diff --git a/doc/fluid/design/quantization/quantization_forward.png b/doc/fluid/design/quantization/quantization_forward.png new file mode 100644 index 0000000000..0913f61621 Binary files /dev/null and b/doc/fluid/design/quantization/quantization_forward.png differ diff --git a/doc/fluid/dev/api_doc_std_cn.md b/doc/fluid/dev/api_doc_std_cn.md index b50f18f21d..7d39b8de1e 100644 --- a/doc/fluid/dev/api_doc_std_cn.md +++ b/doc/fluid/dev/api_doc_std_cn.md @@ -1,8 +1,9 @@ # API注释撰写标准 -- [API注释模块](#API注释模块) -- [格式及示例](#格式及示例) -- [完整示例](#完整示例) +- [API注释撰写标准](#api) + - [API注释模块](#api) + - [格式及示例](#) + - [完整示例](#) ## API注释模块 @@ -217,4 +218,4 @@ API文档须使用reStructuredText格式撰写,该格式详情请参考[链接 ## 完整示例 -fc 的完整注释见[示例](src/fc.py)。 +fc 的完整注释见[示例](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/src/fc.py)。 diff --git a/doc/fluid/dev/api_doc_std_en.md b/doc/fluid/dev/api_doc_std_en.md index e57072d52f..f175b21975 100644 --- a/doc/fluid/dev/api_doc_std_en.md +++ b/doc/fluid/dev/api_doc_std_en.md @@ -1,8 +1,9 @@ # API Doc Standard -- [API Doc Structure](#API Doc Structure) -- [Format and Examples](#Format and Examples) -- [Complete Example](#Complete Example) +- [API Doc Standard](#api-doc-standard) + - [API Doc Structure](#api-doc-structure) + - [Format and Examples](#format-and-examples) + - [Complete Example](#complete-example) ## API Doc Structure @@ -223,4 +224,4 @@ Format and examples of each part of API documantation are as follows: (take fc f ## Complete Example -Complete Example of fc please see [here](src/fc.py)。 +Complete Example of fc please see [here](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/src/fc.py)。 diff --git a/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md b/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md new file mode 100644 index 0000000000..79df6c5957 --- /dev/null +++ b/doc/fluid/getstarted/Developer's_Guide_to_Paddle_Fluid.md @@ -0,0 +1,1819 @@ + +# Paddle Fluid 开发者指南 + +--- + +### ==1==. 为什么需要 PaddlePaddle Fluid? + +--- + +### 两个基础问题 + + + +1. 如何描述机器学习模型和优化过程? + - 完备自洽,表达能力足以支持潜在出现的各种计算需求 +1. 如何充分利用资源高效计算? + - 支持异步设备、多卡、分布式计算 + - 降低计算/计算优化的开发成本 + - …… + + + +--- + +### 如何描述模型和优化过程? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
一组连续执行的layersvariable和operator构成的计算图 不再有模型的概念
2013 Caffe,Theano, Torch, PaddlePaddle
2015 TensorFlow, MxNet, Caffe2, ONNX, n-graph
2016 PyTorch, TensorFlow Eager Execution, **==PaddlePaddle Fluid==**
+ +--- + + +###

目标

+ + + +- 提高对各类机器学习任务的描述能力:能够描述潜在出现的任意机器学习模型。 +- 代码结构逻辑清晰,各模块充分解耦:内外部贡献者能够专注于自己所需的功能模块,基于框架进行再次开发。 +- 从设计上,留下技术优化的空间和潜力。 +- 代码解耦后降低多设备支持、计算优化等的开发成本。 +- 在统一的设计理念下,实现自动可伸缩,自动容错的分布式计算。 + + + +--- + +## ==2.== Design Overview + +--- + +# Fluid: 系统形态 + +- [编译器式的执行流程,区分编译时和运行时](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/motivation/fluid_compiler.md) +
+ +

+ +

+ +--- + +#### 让我们在Fluid程序实例中,区分编译时和运行时 + +--- +### Fluid 编译时 + + + +- ==**定义前向计算**== + + ```python + x = fluid.layers.data(name='x',shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(x=cost) + ``` + +- ==**添加反向、正则、优化**== + ```python + learning_rate = 0.01 + sgd_optimizer = fluid.optimizer.SGD(learning_rate) + sgd_optimizer.minimize(avg_cost) + ``` + + +--- + +### `Program` vs. 计算图 + + + +- 在科学计算领域,计算图是一种描述计算的经典方式。下图展示了从前向计算图(蓝色)开始,通过添加反向(红色)和优化算法相关(绿色)操作,构建出整个计算图的过程: +- +

+ +

+ + +- Fluid ==使用`Program`而不是计算图==来描述模型和优化过程。`Program`由`Block`、`Operator`和`Variable`构成,相关概念会在后文详细展开。 +- 编译时 Fluid 接受前向计算(这里可以先简单的理解为是一段有序的计算流)`Program`,为这段前向计算按照:前向 -> 反向 -> 梯度 clip -> 正则 -> 优化 的顺序,添加相关 `Operator`和`Variable`到`Program`到完整的计算。 + +
+ +--- + +### Fluid 运行时 + + + +- ==**读入数据**== + + ```python + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=20) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + ``` +- ==**定义执行程序的设备**== + ```python + place = fluid.CPUPlace() + feeder = fluid.DataFeeder(place=place,feed_list=[x, y]) + ``` + +- ==创建执行器(Executor),执行初始化 `Program`和训练`Program`== + + ```python + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + PASS_NUM = 100 + for pass_id in range(PASS_NUM): + for data in train_reader(): + avg_loss_value, = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost]) + print(avg_loss_value) + ``` + + +--- + +### 总结:框架做什么?用户做什么? +
+ + + + + + + + + + + + + + + + +
构建训练执行训练
+用户:描述前向运算
框架:添加反向运算
框架:添加优化运算
框架:添加内存优化
框架:添加并行/多设备/分布式相关的计算单元 +
+框架:创建Operator(计算)+ Variable(数据)
框架:创建`Block`
框架:内存管理/设备管理
框架:执行计算 +
+
+ +--- + +###

总结:编译时

+ + +**用户编写一段Python程序,描述模型的前向计算** +1. 创建变量描述 `VarDesc` +1. 创建operators的描述 `OpDesc` +1. 创建operators的属性 +1. 推断变量的类型和形状,进行静态检查:`inferShape` +1. 规划变量的内存复用 +1. 创建反向计算 +1. 添加优化相关的Operators +1. (可选)添加多卡/多机相关的Operator,生成在多卡/多机上运行的程序 + + + +--- + +###

总结:运行时

+ + +**执行规划好的计算** +1. 创建`Executor` +1. 为将要执行的一段计算,在层级式的`Scope`空间中创建`Scope` +1. 创建`Block`,依次执行`Block` + +

+
+ Figure. 编译时运行时概览 +

+ +
+ +--- + +## ==3==. 用户如何描述计算? +--- + +### Fluid:==像写程序一样==定义计算 + + +- 顺序执行 + ```python + x = fluid.layers.data(name='x',shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + ``` + +- 条件分支: [swith](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/execution/switch.md)、[ifelse](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/execution/if_else_op.md) + + ```python + a = fluid.Var(10) + b = fluid.Var(0) + + switch = fluid.switch() + with switch.block(): + with switch.case(fluid.less_equal(a, 10)): + fluid.print("Case 1") + with switch.case(fluid.larger(a, 0)): + fluid.print("Case 2") + with switch.default(): + fluid.print("Case 3") + ``` + +>[A Lisp cond form may be compared to a continued if-then-else as found in many algebraic programming languages](https://www.cs.cmu.edu/Groups/AI/html/cltl/clm/node84.html). + + + +--- + +### Fluid: ==像写程序一样==定义计算 + + + +- 循环:[while](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_machine_translation.py#L105) + + ```python + d0 = layers.data("d0", shape=[10], dtype='float32') + data_array = layers.array_write(x=d0, i=i) + array_len = layers.fill_constant(shape=[1],dtype='int64', value=3) + + cond = layers.less_than(x=i, y=array_len) + while_op = layers.While(cond=cond) + with while_op.block(): + d = layers.array_read(array=data_array, i=i) + i = layers.increment(x=i, in_place=True) + layers.array_write(result, i=i, array=d) + layers.less_than(x=i, y=array_len, cond=cond) + ``` + +- 完整实例请点查看 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/test_while_op.py#L36-L44) +- beam search [->]( https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_machine_translation.py#L105) + + + +--- + +####

总结

+ + + +1. 用户层提供的描述语法具有完备性、自洽性,有能力支持对复杂计算过程描述 +1. 使用方式和核心概念可以类比编程语言,认知能够直接迁移 +1. 能够支持:定义问题,逐步求解 + + + +--- + +## ==3.== 核心概念 + +--- +### 编译时概念 :==变量和计算的描述== + + + +- `VarDesc` + `TensorDesc` + `OpDesc` -> `BlockDesc` -> `ProgramDesc` + - https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto + +- 什么是 Fluid Program + + - 在Fluid中,一个神经网络任务(训练/预测)被描述为一段`Program` + - `Program`包含对`Variable`(数据)和 `Operator`(对数据的操作)的描述 + - `Variable` 和 `Operator` 被组织为多个可以嵌套的`Block`,构成一段完整的`Fluid Program` + + +>编译阶段最终,经过 Transpiler 的执行规划,变换处理,生成使用`protobuf`序列化后的`ProgramDesc`。可以发送给多卡或者网络中的其它计算节点执行 + + + +--- + +### 编译时概念 :==**[Transpiler](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/motivation/fluid_compiler.md)**== + + +1. 接受一段`ProgramDesc`作为输入,生成一段新的`ProgramDesc` + + - *Memory optimization transpiler*:向原始`ProgramDesc` 中插入 `FreeMemoryOps`,在一次迭代优化结束前提前释放内存,使得能够维持较小的 memory footprint + + - *Distributed training transpiler*:将原始的`ProgramDesc`中转化为对应的分布式版本,生成两段新的`ProgramDesc`: + 1. trainer进程执行的`ProgramDesc` + 1. parameter server执行的`ProgramDesc` + +1. ==**WIP**==: 接受一段`ProgramDesc`,生成可直接被`gcc`, `nvcc`, `icc`等编译的代码,编译后得到可执行文件 + + + +--- +### Transplier + +

+ +

+ +--- + +### 打印 `ProgramDesc` + +

+ +

+ + + +- `default_startup_program`:创建可学习参数,对参数进行初始化 +- `default_main_program`:由用户定义的模型,包括了前向、反向、优化及所有必要的计算 + +- 打印可读的 `Program` + ```python + from paddle.v2.fluid import debuger + print debuger.pprint_program_codes(framework.default_main_program().desc) + ``` + + +--- +### 输出效果 + + + + + + + + + + + + + + +
variable in block 0variable in block 0
+
+ +--- + +### 运行时概念 + + + +- 数据相关 + - `Tensor` / `LoDTensor` / `Variable` + - `Scope` + +- 计算相关 + - `Block` + - `Kernel`、`OpWithKernel`、`OpWithoutKernel` + + + + + + + + + + + + + + + + + + + + + + + + + + + +
protobuf messagesC++ class objects
Data[VarDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto#L107) +[Variable](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/variable.h#L24) +
Operation[OpDesc](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/framework.proto#L35) +[Operator](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/operator.h#L64) +
BlockBlockDesc +Block +
+ +- 执行相关 :`Executor` + +
+ +--- +#### Tensor 和 LoD(Level-of-Detail) Tensor + + +- Tensor 是$n$-dimensional arry的推广,LoDTensor是在Tensor基础上附加了序列信息 +- Fluid中输入、输出,网络中的可学习参数全部统一使用LoDTensor(n-dimension array)表示 +- 一个mini-batch输入数据是一个LoDTensor + - 在Fluid中,RNN 处理变长序列无需padding,得益于 `LoDTensor`表示 + - 可以简单将 LoD 理解为:`std::vector>` + - 对非序列数据,LoD 信息为空 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
TensorFlowPaddlePaddle
RNNSupport +Support +
recursive RNNSupport +Support +
padding zerosMust +No need +
blob data typeTensor +LODTensor +
+ +
+ +--- +#### LoD 信息实例 + + + +

+ +

+ +- 图(a)的LoD 信息 + ```cpp + [0, 5, 8, 10, 14] + ``` +- 图(b)的 LoD 信息 + ```cpp + [[0, 5, 8, 10, 14] /*level=1*/, [0, 2, 3, 5, 7, 8, 10, 13, 14] /*level=2*/] + ``` +
+ +--- +#### Tensor, Variable, Scope 之间的关系 + +

+ +

+ + +1. `Block` 是一个实现层的概念,不在应用层暴露给用户。目前用户无法自行创建并利用`Block`,用户能够感知的只有`Program`这个概念。 +1. 逻辑上,可以将 `Block` 类比为编程语言中的大括号:定义了一段作用域,其中运行一段代码 +1. `Executor`会为每一个`Block`创建一个`Scope`,`Block`是可嵌套的,因此`Scope`也是可嵌套的 + + + +--- +### Executor + + + + + + + + + + + + + + +
接口说明

+ +

输入
1. `ProgramDesc`
2. `Scope`
3.`block_id`

解释执行步骤
1. 创建所有 Variables
2. 逐一创建 Operator 并运行 +
+ +--- +### Operator/OpWithKernel/Kernel + + +

+ +

+ +- operator 无状态,Operator的核心是==Run==方法 +- 一个operator可以注册多个kernel +- operator 可以无 kernel:while_op 、ifelse op + +
+ +--- +#### Fluid Operator vs. PaddlePaddle layers + + + + + + + + + + + + + + + + + + +
LayerOperator

+ +

+ +

1. 内部维护状态
2. 包含forward和backward方法
1. 内部无状态
2. 只有Run方法
+ +
+ +--- + +### ==4.== 内存管理 + +--- +### 目标 + +- 为异构设备提供统一的内存分配、回收接口 +- 最小化管理内存所需的时间,最小化管理开销 +- 减少内存碎片 +- 将内存管理与计算(Operators/Kernels)完全剥离 +- 统一内存管理是内存优化的基础 + +--- + + + +### Memory 接口 + +- 内存管理模块向上层应用逻辑提供三个基础接口: + ```cpp + template + void* Alloc(Place place, size_t size); + + template + void Free(Place place, void* ptr); + + template + size_t Used(Place place); + + struct Usage : public boost::static_visitor { + size_t operator()(const platform::CPUPlace& cpu) const; + size_t operator()(const platform::CUDAPlace& gpu) const; + }; + ``` +- 模板参数 `Place` 指示内存分配发生的设备 +- 实现时,需特化支持的 `Place`, 提供以上三个接口的实现 + + + +--- +### 代码结构 + + + +内存管理模块可以理解为由以下两部分构成: + +1. SystemAllocator:实际从物理设备上分配、释放的内存的接口 +1. BuddyAllocator:内存管理算法 + + + +--- +### System Allocator + + + +- SystemAllocator 是实现物理内存分配、回收的基类 + - 不同设备上的内存分配和回收终将转化为标准接口调用 + - 为不同设备实现MemoryAllocator,继承自SystemAllocator + + ```cpp + class SystemAllocator { + public: + virtual ~SystemAllocator() {} + virtual void* Alloc(size_t& index, size_t size) = 0; + virtual void Free(void* p, size_t size, size_t index) = 0; + virtual bool UseGpu() const = 0; + }; + ``` + + +--- + +### CPU/GPU Allocator + + + +```cpp +class CPUAllocator : public SystemAllocator { + public: + virtual void* Alloc(size_t& index, size_t size); + virtual void Free(void* p, size_t size, size_t index); + virtual bool UseGpu() const; +}; + +#ifdef PADDLE_WITH_CUDA +class GPUAllocator : public SystemAllocator { + public: + virtual void* Alloc(size_t& index, size_t size); + virtual void Free(void* p, size_t size, size_t index); + virtual bool UseGpu() const; + private: + size_t gpu_alloc_size_ = 0; + size_t fallback_alloc_size_ = 0; +}; +#endif +``` +- CPUAllocator和GPUAllocator分别继承自SystemAllocator,分别调用相应的标准库函数实现物理内存的分配和释放。 +- 一旦大块、连续的物理内存分配之后,将通过内存管理算法实现内存的按块分配、回收、重用等。 + + + +--- +### CPU Allocator + + + +- CPU 内存的分配提供两种选项: + 1. non-pinned memory:可分页内存 + 2. pinned memory:页锁定内存 + - 分配过大的页锁定内存有可能因为系统可使用的分页内存减少,影响系统性能,默认CPU下分配的是可分页内存 + +- 通过gflags进行设置一次性分配内存的大小以及是否使用页锁定内存。 + + ```cpp + DEFINE_bool(use_pinned_memory, true, "If set, allocate cpu pinned memory."); + DEFINE_double(fraction_of_cpu_memory_to_use, 1, + "Default use 100% of CPU memory for PaddlePaddle," + "reserve the rest for page tables, etc"); + ``` + + + +--- +### GPU Allocator + + + +- 通过 cudaMalloc 分配GPU显存 +- GPUAllocator::Alloc 首先会计算指定GPU device上的可用显存 + - 如果可用显存小于请求分配大小,调用cudaMalloc进行分配 + - 如果可用显存不足,目前会报错退出。 +- 通过gflags控制GPU下一次性分配显存的大小: + + ```cpp + DEFINE_double(fraction_of_gpu_memory_to_use, 0.92, + "Default use 92% of GPU memory for PaddlePaddle," + "reserve the rest for page tables, etc"); + ``` + + + +--- +#### 内存管理算法: [Buddy Memory Allocation](https://en.wikipedia.org/wiki/Buddy_memory_allocation) + + + +- Memory Arena:一次性分配大块连续内存,之后会基于这块内存进行内存管理:动态分配、释放、重用内存块。 +- 伙伴内存分配: + - 将内存划分为 2 的幂次方个分区,使用 best-fit 方法来分配内存请求。 + - 当释放内存时,检查 buddy 块,查看相邻的内存块是否也已被释放。如果是,将内存块合并,以最小化内存碎片。 + - 分配的内存在物理内存的自然边界对齐,提高内存访问效率。 + - 算法的时间效率高,单使用 best-fit 方法的缘故,会产生一定的内存浪费 + + + +--- + +### Buddy Allocator + + + +- BuddyAllocator 是一个单例,每个设备(如: GPU/CPU(0)/GPU(1)) 拥有一个BuddyAllocator +- BuddyAllocator 内部拥有一个私有成员变量 SystemAllocator +- 当请求的内存超过BuddyAllocator管理的空余内存时,将会调用SystemAllocator去指定的设备上分配物理内存 + + + +--- +### 实例:CPU 下内存管理接口的实现 + + + +- 对上层应用,统一通过BuddyAllocator来实现内存的分配、释放以及用量查询 + ```cpp + template <> + void* Alloc(platform::CPUPlace place, size_t size) { + VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); + void* p = GetCPUBuddyAllocator()->Alloc(size); + VLOG(10) << " pointer=" << p; + return p; + } + + template <> + void Free(platform::CPUPlace place, void* p) { + VLOG(10) << "Free pointer=" << p << " on " << platform::Place(place); + GetCPUBuddyAllocator()->Free(p); + } + + template <> + size_t Used(platform::CPUPlace place) { + return GetCPUBuddyAllocator()->Used(); + } + ``` + + +--- +### ==5.== 多设备支持 + +--- +### 多设备支持(一) + + + +- step 1:添加Place类型,由用户实现添加到框架 + - 可以将Place类型理解为一个整数加上一个枚举型,包括:设备号 + 设备类型 + +

+ +

+- DeviceContext + - 不同的Place会对应一个相应的DeviceContext,用于组织管理与设备相关的信息 + - 例如,GpuDeviceContext中会管理Cuda stream + - 目前实现中一些特殊的库也会对应有自己的DeviceContext:例如: + ```cpp + class MKLDNNDeviceContext : public CPUDeviceContext {……} + ``` + - 每种设备对应的DeviceContext需要管理的内容不尽相同,视具体需求来实现 + +
+ +--- + +### 多设备支持(二) + + + +- step 2: 增加KernelType,为相应的KernelType注册Kernel对象,由用户实现注册给框架 可以按照: + 1. Place 执行设备 + 1. DataType 执行数据类型 FP32/FP64/INT32/INT64 + 1. Memory layout: 运行时 Tensor 在内存中的排布格式 NCHW、 NHWC + 1. 使用的库 + + 来区分Kernel,为同一个operator注册多个 Kernel。 + + ```cpp + struct OpKernelType { + proto::DataType data_type_; + DataLayout data_layout_; + platform::Place place_; + LibraryType library_type_; + } + ``` + + + +--- + +### 多设备支持(三) + + + +step 3: 运行时的 KernelType 推断和Kernel切换,按需要修改Kernel推断和Kernel切换规则 +- Expected Kernel:期待调用的Kernel:由(1)`Place`和计算精度决定;或(2)用户在配置中显示指定使用的计算库,如`cudnn`、`mkldnn`等。 +- Actual Kernel:运行时从`Operator`的输入(`Variable`)可以推断出实际需要的`KernelType` +- 当Expected Kernel和Actual Kernel不一致的时候,框架会插入`data_transformer`或者`data_layerout_transform`等,保证Expected Kernel可以执行,包括: + - CPUPlace -> GPUPlace :跨设备内存复制 + - NCHW -> nChw8c :Layout转换 + - FP32 -> FP16 :精度转换 _**尚未支持**_ + - …… +- 以上过程实现在OperatorWithKernel类的Run方法中 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/framework/operator.cc#L497) + + + +--- +## ==6.== while_op + +--- +### while_op + + + +- 循环执行一段`Program`,直到条件operator判断循环条件不满足时终止循环 +- while_op 的特殊之处: + 1. while_op 没有 kernel + 1. while_op 拥有自己的`Block`,会形成一段嵌套的`Block` + 1. ==while_op 内部创建了一个 Executor,来循环执行`Block`== + +- while_op 输入输出 : LoDTensorArray + ```cpp + namespace paddle { + namespace framework { + using LoDTensorArray = std::vector; + } + } + ``` + - 每一次循环,从原始输入中“切出”一个片段 + - LoDTensorArray 在Python端暴露,是Fluid支持的基础数据结构之一,用户可以直接创建并使用 + + + +--- +### while_op [Run](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/while_op.cc#L42) 方法概览 + + + +```cpp + +void Run(const framework::Scope &scope, + const platform::Place &dev_place) const override { + PADDLE_ENFORCE_NOT_NULL(scope.FindVar(Input(kCondition))); + auto &cond = scope.FindVar(Input(kCondition))->Get(); + PADDLE_ENFORCE_EQ(cond.dims(), paddle::framework::make_ddim({1})); + + framework::Executor executor(dev_place); + auto *block = Attr(kStepBlock); + + auto *program = block->Program(); + auto step_scopes = + scope.FindVar(Output(kStepScopes))->GetMutable(); + + while (cond.data()[0]) { + auto ¤t_scope = scope.NewScope(); + step_scopes->push_back(¤t_scope); + executor.Run(*program, ¤t_scope, block->ID(), + false /*create_local_scope*/); + } +} + +``` + + + +--- +### while_op 的重要应用:Dynamic RNN + +--- + +### 什么是 `dynamicRNN` ? + + +
+ +1. 用户可以自定义在一个时间步之内的计算, 框架接受序列输入数据,在其上循环调用用户定义的单步计算 +1. 可学习参数在多个时间步之间共享 +1. `dynamicRNN` 由 `while_op` 实现 +1. 如果`dynamicRNN`中定义了`memory`,将会构成一个循环神经网络,否则其行为就等于在输入序列上循环调用预定义的单步计算 + +
+ +--- + +#### `dynamic RNN` 用户接口 + + +

+ +

+ +- `dynamicRNN` 中的重要元素 + 1. **step input**: `dynamicRNN` 每个时间步的输入 + 1. **step function**: 用户定义的单步计算 + 1. **memory**: 用于形成循环连接 + 1. **external/static memory**:单步计算的每一步都可以全部读取到的外部输入 + +
+ +--- + +#### dynamicRNN 中的 Memory + + + +`dynamicRNN`中`memory`的行为非常类似于 C++ 中的引用变量 + - `memory` “指向” 一个operator的输出变量,记作: A + - `memory` 可以被 LoDTensor 初始化(当LoD信息为空时,为非序列,否则为序列),默认`memory`被初始化为零 + - `memory` 在 operator A 前向计算之后,进行前向计算 + - 当 `memory` 的前向计算会 "指向" A 的输出 LoDTensor + - `memory` 的输出可以是另一个 operator 的输入,于是形成了“循环”连接 + + + +--- + +### DynamicRNN 实现细节 + + + +- `while_op` 无法独立构成dynamicRNN,必须和一组相关的 operator 及数据结构配合 + - 依赖的 operators (这里仅列出最重要的,并非全部): + - `lod_rank_table` operator + - `lod_tensor_to_array` operator + - `array_to_lod_tensor` operator + - `shrink_memory` operator + - 依赖的数据结构 + - `TensorArray` + - `LoDRankTable` + +- 在Fluid中,RNN接受变长序列输入,无需填充,以上数据结构和相关的operator配合工作,实现了对变长输入以batch计算 + + + +--- + +### `dynamicRNN` 如何实现 batch 计算 ? + + + +- 问题: + - RNN 可以看作是一个展开的前向网络,前向网络的深度是最长序列的长度 + - 如果不对变长序列进行填充,将它们填充到一样长度,每个mini-batch输入将会不等长,每个样本展开长度不一致,导致前向和反向计算实现困难 + + + +---- +##### 实例 :RNN encoder-decoder with attention + + + +- 以机器翻译的RNN encoder-decoder 模型(涉及了`dynamicRNN`的所有设计要素)为例,下图是 RNN encoder-decoder 的原始输入: +

+
Figure. RNN encoder-decoder 原始batch 输入数据 +

+ +- source word sequences 是encoder RNN的输出,是一个LoDTensor +- target word sequences 是look_uptable的输入,是一个LoDTensor +- 上图中一个矩形方块是CPU/GPU内存中一片连续的内存空间,表示一个dense vector + +
+ +--- + +### `dynamicRNN` 如何实现 batch 计算 ? + + + +1. 对一个mini batch中不等长样本进行排序,最长样本变成batch中的第一个,最短样本是batch中最后一个 + - `LoDTensor` -> `LoDRankTable` :heavy_plus_sign: `lod_rank_table operaator` + - 可以将`LoDRankTable`理解为对LoDTensor中的多个序列按照长度排序LoDRankTable 存储了排序之后的index + +2. 构建每个时间步的batch输入:随着时间步增加,每个时间步的batch输入可能会逐渐缩小 + - `TensorArray` :heavy_plus_sign: `lod_tensor_to_array` -> `LoDTensor` (without LoD) +3. 每个时间步输出写入一个输出 `LoDTensorArray` +3. `dynamicRNN`循环结束后, 按照`LoDRankTable`中记录的信息对输出`LoDTensorArray`重排序,还原会原始输入顺序 + - `TensorArray` :heavy_plus_sign: `array_to_lod_tensor` -> `LoDTensor` + + + +--- + +### 运行实例 + +

+ +

+ +--- +### 运行实例 + +

+ +

+ + + +- 执行到第5~7个batch时,batch size将会缩小 + + + +--- +### 运行实例 + +

+ +

+ + + +- 第5 ~ 7个batch时RNN的`memory`会发生什么? + - `memory` 指向某个operator的输出Tensor,在该operator前向计算之后,“取回”其计算结果 + - 5 ~ 7时,遇到了序列的结束,==下一个时间步计算不再需要在已经结束的序列上展开== + - 在`dynamicRNN`中`shrink_memory` operator 用来缩小`memory`的batch输入 + + + +--- +### 运行实例:batch 1 ~ 2 + +

+
Figure. 第1、2个batch输入dynamicRNN的batch输入 +

+ +--- +### 运行实例:batch 3 ~ 4 + +

+
Figure. 第3、4个batch输入dynamicRNN的batch输入 +

+ +--- + +### 运行实例:batch 5 ~ 7 + +

+
Figure. 第5、6、7个batch输入dynamicRNN的batch输入 +

+ +--- +### ==7.== Fluid 代码结构 + +--- +### Fluid 代码结构 + + + + + + + + + + + + + + + +
代码结构模块结构
+

+ +

+
+

+ +

+
+ +--- + +### ==8.== 文档总结 + +--- + + +- 设计概览 + - 重构概览 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/refactorization.md) + - fluid [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/fluid.md) + - fluid_compiler [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/motivation/fluid_compiler.md) +- 核心概念 + - variable 描述 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/var_desc.md) + - Tensor [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/tensor.md) + - LoDTensor [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/framework/lod_tensor.md) + - TensorArray [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/tensor_array.md) + - Program [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/program.md) + - Block [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/block.md) + - Scope [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md) + +--- + +- 重要功能模块 + - backward [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/backward.md) + - 内存优化 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/memory_optimization.md) + - evaluator [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/executor.md) + - python API [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/python_api.md) + - regularization [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/regularization.md) + +- 开发指南 + - 支持新设硬件设备库 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/support_new_device.md) + - 添加新的Operator [->](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_cn.md) + - 添加新的Kernel [->]( +https://github.com/PaddlePaddle/Paddle/blob/develop/doc/howto/dev/new_op_kernel_en.md) + + + +--- + +### ==9.== 开发指南 + +--- + +#### 建议开发环境:使用 Docker 编译和测试 + + + +Docker编译PaddlePaddle源码: [->](http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/build_and_install/docker_install_cn.html) + +PaddlePaddle 在 Dockerhub 地址:[->]( + https://hub.docker.com/r/paddlepaddle/paddle/tags/) + +1. 获取PaddlePaddle的Docker镜像 + ```bash + docker pull paddlepaddle/paddle:latest-dev + ``` + +1. 启动 docker container + + ```bash + docker run -it -v $PWD/Paddle:/paddle paddlepaddle/paddle:latest-dev /bin/bash + ``` + +1. 进入docker container后,从源码编译,请参考文档 [->]( http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/build_and_install/build_from_source_cn.html) + + + +--- + +### 一些说明 + + + +1. PaddlePaddle的Docker镜像为了减小体积,默认没有安装vim,可以在容器中执行`apt-get install -y vim`来安装vim。 +1. 开发推荐使用tag为`latest-dev`的镜像,其中打包了所有编译依赖。`latest`及`lastest-gpu`是production镜像,主要用于运行PaddlePaddle程序。 +2. 在Docker中运行GPU程序,推荐使用nvidia-docker,[否则需要将CUDA库和设备挂载到Docker容器内](http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/build_and_install/docker_install_cn.html)。 + + + ```bash + nvidia-docker run -it -v $PWD/Paddle:/paddle paddlepaddle/paddle:latest-dev /bin/bash + ``` + + + + + +--- + +### [如何贡献](http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/dev/contribute_to_paddle_cn.html) + + + +- ==提交PullRequest前请务必阅读==: [->](http://www.paddlepaddle.org/docs/develop/documentation/fluid/zh/dev/contribute_to_paddle_cn.html) +- 代码要求 + 1. 代码注释遵守 Doxygen 的样式 + 1. 确保编译器选项 WITH_STYLE_CHECK 已打开,并且编译能通过代码样式检查 + 1. 所有代码必须具有单元测试,且能够通过所有单元测试 +- 使用 `pre-commit` 钩子提交Pull Request + 1. 帮助格式化源代码(C++,Python) + 1. 在提交前自动检查一些基本事宜:如每个文件只有一个 EOL,Git 中不要添加大文件等 + 1. 安装pre-commit,并在PaddlePaddle根目录运行: + ```bash + ➜ pip install pre-commit + ➜ pre-commit install + ``` + + +--- + +### 如何贡献 + + + +1. 开始开发之前请先建立issue。 + - 让其它同学知道某项工作已经有人在进行,以避免多人开发同一功能的情况。 +1. 提交PR必须关联相关的issue。做法请参考:[->](https://help.github.com/articles/closing-issues-using-keywords/) + - 目的:为了在提交的版本中留有记录描述这个PR是为了开发什么样的功能,为了解决什么样的问题。 + - 当PR被merge后,关联的issue会被自动关闭。 +1. PR review 中,reviewer的每条comment都必须回复。 + - 如修改完可直接回复:Done。 + - 目的:review comment 中可能会有(1)询问类型的问题;(2)可以在下一个PR修改的问题;(3)comment意见不合理等。需要明确回复,以便reviewer和其他人有历史可查,便于区分是否已经进行修改,或者准备下一个PR修改,或者意见不合理可以不用进行修改。 + + + +--- + +### ==10.== 添加新的 Operator + +--- + +### 概念简介 + + + +添加一个新的operator,会涉及实现以下C++类的派生类: + +1. `framework::OperatorBase`: Operator(简写,Op)基类。 +1. `framework::OpKernel`: Op计算函数的基类,称作Kernel。 +1. `framework::OperatorWithKernel`:继承自OperatorBase,Op有计算函数,称作有Kernel。 +1. `class OpProtoAndCheckerMaker`:描述该Op的输入、输出、属性、注释,主要用于Python API接口生成 + +依据是否包含kernel,可以将Op分为两种: +1. 包含Kernel的Op:继承自OperatorWithKernel,==绝大多数operator都属于这一类== +1. 不包含kernel的Op,继承自OperatorBase,只有少量Op属于这一类,例如while_op,ifelse_op + +这里主要介绍带Kernel的Op如何编写。 + + + +--- + +#### 添加新的Operator需要修改/添加哪些文件? + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
内容定义位置
+OpProtoMake定义 + +`.cc`文件,Backward Op不需要OpProtoMaker +
+Op定义 + +`.cc`文件 +
+Kernel实现 + +CPU、CUDA共享Kernel实现在`.h`文件中,否则,CPU 实现在`.cc`文件中,CUDA 实现在`.cu`文件中。 +
+注册Op + +Op注册实现在`.cc`文件;Kernel注册CPU实现在`.cc`文件中,CUDA实现在`.cu`文件中 +
+ +- 添加 Operator 之前请阅读:[Operator 命名规范](https://github.com/PaddlePaddle/Paddle/blob/63cca04cfd488a4dab6d6273fd04a8017ef45932/doc/fluid/dev/name_convention.md)及[Operator Markdown注释规范](https://github.com/PaddlePaddle/Paddle/blob/63cca04cfd488a4dab6d6273fd04a8017ef45932/doc/fluid/dev/op_markdown_format.md)。 +- 实现新的op都添加至目录[paddle/operators](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/fluid/operators)下,文件命名以`*_op.h`(如有) 、 `*_op.cc` 、`*_op.cu`(如有)结尾。 +- 根据文件名自动构建op和Python端绑定,请务必遵守以上命名,否则需要进一步修改PyBind相关文件及CMakeLists.txt。 +
+ +--- + +###### 实现带Kernel的Operator step1: 定义ProtoMaker类 + + + +下面均以[clip_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/clip_op.h)为例进行介绍 + +- clip_op计算公式:$Out = \min(\max(X, min), max)$ +- 首先定义`ProtoMaker`来描述该Op的输入、输出,并添加注释(*下面代码段的中注释进行了简化,实现时需按照规范添加注释*): + + ```cpp + template + class ClipOpMaker : public framework::OpProtoAndCheckerMaker { + public: + ClipOpMaker(OpProto* proto, OpAttrChecker* op_checker) + : OpProtoAndCheckerMaker(proto, op_checker) { + AddInput("X","(Tensor)The input of clip op."); + AddOutput("Out", "(Tensor),The output of clip op."); + AddAttr( + "min", "(float),Minimum value."); + AddAttr( + "max", "(float),Maximum value."); + AddComment(R"DOC( + …… + )DOC"); + } + }; + ``` + + + +--- + +###### 实现带Kernel的Operator step2: 定义Operator类 + + + +下面的代码段实现了`clip_op`的定义: + +```cpp +class ClipOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ClipOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ClipOp should not be null."); + auto x_dims = ctx->GetInputDim("X"); + auto max = ctx->Attrs().Get("max"); + auto min = ctx->Attrs().Get("min"); + PADDLE_ENFORCE_LT(min, max, "max should be greater than min."); + ctx->SetOutputDim("Out", x_dims); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; +``` + + +--- + +### Operator 类中需要完成的工作 + + + +1. clip_op 继承自`OperatorWithKernel`, + + ```cpp + using framework::OperatorWithKernel::OperatorWithKernel; + ``` + 表示使用基类`OperatorWithKernel`的构造函数。 + +1. 重写`InferShape`接口。 + - `InferShape` 为const函数,不能修改Op的成员变 + - `InferShape` 的参数为 `const framework::InferShapeContext &ctx`,从中可获取到输入输出以及属性 + - `InferShape` 会被调用两次,一次是编译时(创建op),一次是运行时(调用op的`Run`方法时),需要完成以下功能: + 1. 做检查, 尽早报错:检查输入数据维度、类型等是否合法 + 2. 设置输出Tensor的形状 + +通常`OpProtoMaker`和`Op`类的定义写在`.cc`文件中。 + + + +--- + +### 补充说明 + + + +1. `InferShape`目前支持两种实现方式,二者最后都会生成一个functor注册给OpInfo结构体。 + 1. 继承framework::InferShapeBase,实现为一个functor(参考 [mul_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/mul_op.cc#L22)) + 2. override InferShape函数(参考 [clip_op](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/clip_op.cc#L24)) + +1. 什么是`functor` ? + + - 类或结构体仅重载了`()`,一般是可被多个kernel复用的计算函数。 + + + + ```cpp + template + class CrossEntropyFunctor { + public: + void operator()(const platform::CPUDeviceContext& ctx, + framework::Tensor* out, + const framework::Tensor* prob, + const framework::Tensor* labels, const bool softLabel) { + …… + } + }; + ``` + + + - 在 clip_op 内也会看到将一段计算函数抽象为functor的使用法: [->](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/operators/clip_op.h#L27)。 + + + +--- + +###### 实现带Kernel的Operator step3: 定义OpKernel类 + + + +- `ClipKernel`继承自`framework::OpKernel`,带有下面两个模板参数: + 1. `typename DeviceContext`: 表示设备类型,不同设备共享同一个Kernel时,需添加该模板参数。不共享时,需要提供针对不同设备的特化实现。 + 1. `typename T` : 表示支持的数据类型,如`float`, `double`等 + +- 在`ClipKernel`类中重写`Compute`方法 + 1. `Compute`接受输入参数:`const framework::ExecutionContext& context` + - `ExecutionContext` 是从 `Scope`中将运行时Op的输入、输出`Variable`组织在一起,使得Op在调用`Compute`方法时,能够简单地通过名字拿到需要的输入输出`Variable` + - 与`InferShapeContext`相比,`ExecutionContext` 中增加了设备类型 + 1. 在`Compute`函数里实现`OpKernel`的具体计算逻辑 + + + +--- +#### ClipKernel 代码概览 + + + +```cpp +template +class ClipKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto max = context.Attr("max"); + auto min = context.Attr("min"); + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + T* out_data = out->mutable_data(context.GetPlace()); + const T* x_data = x->data(); + int64_t numel = x->numel(); + Transform trans; + trans(context.template device_context(), x_data, + x_data + numel, out_data, ClipFunctor(min, max)); + } +}; +``` + +- 为了使`OpKernel`的计算过程书写更加简单,并且CPU、CUDA的代码可以复用, Fluid 使用 Eigen 作为基础的矩阵运算库 +- Fluid对Eigen unsupported Tensor提供了一些基本的封装,可以在`Compute`接口中直接调用 + - 关于在PaddlePaddle中如何使用Eigen库,请参考[使用文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/dev/use_eigen_cn.md)。 + + + +--- +###### 实现带Kernel的Operator step4: 实现反向Op + + + +- ==**反向Op没有`ProtoMaker`**==,除此之外定义与实现方式前向Op完全一致,不再赘述 +- 这里仅对反向Op的输入输出进行说明: + 1. 反向Op的输入 + - 前向Op的输出 + - 反向传播过程中传递给当前Op的梯度 + - 需要注意,Fluid中,不区分Cost Op和中间层Op,所有Op都必须正确处理接收到的梯度 + 2. 反向Op的输出 + - 对可学习参数的求导结果 + - 对所有输入的求导结果 + + + + +--- + +###### 实现带Kernel的Operator step5: 注册Op及Kernel + + + +至此Op和Op kernel都已经实现完毕,接下来,需要在`.cc`和`cu`文件中注册op和kernel + +1. 在`.cc`文件中注册前向、反向Op类,注册CPU Kernel。 + + + + ```cpp + namespace ops = paddle::operators; + REGISTER_OP(clip, ops::ClipOp, ops::ClipOpMaker, clip_grad, + ops::ClipOpGrad); + REGISTER_OP_CPU_KERNEL( + clip, ops::ClipKernel); + REGISTER_OP_CPU_KERNEL( + clip_grad, ops::ClipGradKernel); + ``` + + - 在上面的代码片段中: + + 1. `REGISTER_OP` : 注册`ops::ClipOp`类,类型名为`clip`,该类的`ProtoMaker`为`ops::ClipOpMaker`,注册`ops::ClipOpGrad`,类型名为`clip_grad` + 1. `REGISTER_OP_WITHOUT_GRADIENT` : 用于注册没有反向的Op,例如:优化算法相关的Op + 1. `REGISTER_OP_CPU_KERNEL` :注册`ops::ClipKernel`类,并特化模板参数为`paddle::platform::CPUPlace`和`float`类型,同理,注册`ops::ClipGradKernel`类 + + +1. 按照同样方法,在`.cu`文件中注册GPU Kernel + - 如果CUDA Kernel的实现基于Eigen,需在 `.cu`的开始加上宏定义 `#define EIGEN_USE_GPU` + + + +--- + +##### 编译和Python端绑定 + + + +- 运行下面命令可以仅编译新添加的Op: + + ``` + make mul_op + ``` + - 需注意,运行单元测试需要编译整个工程 + +- 如果遵循前文的文件命名规则,构建过程中,会自动为新增的op添加Python端绑定,并链接到生成的lib库中 + + + +--- + +###### 实现带Kernel的Operator step6: 添加前向单测及梯度检测 + + + +- 新增Op的单元测试统一添加至:[python/paddle/v2/fluid/tests/unittests](https://github.com/PaddlePaddle/Paddle/tree/develop/python/paddle/fluid/tests/unittests)目录 +- 前向Operator单测 + + 1. Op单元测试继承自`OpTest`,各项具体的单元测试在`TestClipOp`里完成,所有单测case都以`TestXX`命名 + 1. 单元测试Operator,需要: + 1. 在`setUp`函数定义输入、输出,以及相关的属性参数 + 1. 生成随机的输入数据 + 1. 在Python脚本中实现与前向operator相同的计算逻辑,得到输出值,与operator前向计算的输出进行对比 + 1. 反向梯度检测流程测试框架已经实现,直接调用相应接口`check_grad`即可 + +- `clip_op` 单测代码请参考 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/test_clip_op.py),这里不再展开 + + + +--- +#### 编译执行单测 + + + +- `python/paddle/v2/framework/tests` 目录下新增的 `test_*.py` 单元测试会被自动加入工程进行编译 + + - 运行单元测试测时需要编译整个工程,并且编译时需要打开`WITH_TESTING`, 即`cmake paddle_dir -DWITH_TESTING=ON` +- 编译成功后,执行下面的命令来运行单元测试: + + ```bash + make test ARGS="-R test_mul_op -V" + ``` + + 或者: + + ``` + ctest -R test_mul_op + ``` + + +--- + +### 添加Op的一些注意事项 + + + +- 为每个Op创建单独的`*_op.h`(如有)、`*_op.cc`和`*_op.cu`(如有)。不允许一个文件中包含多个Op,将会导致编译出错。 +- 注册Op时的类型名,需要和该Op的名字一样。不允许在`A_op.cc`里面,注册`REGISTER_OP(B, ...)`,会导致单元测试出错。 +- 如果Op没有实现CUDA Kernel,不要创建空的`*_op.cu`,会导致单元测试出错。 +- 如果多个Op依赖一些共用的函数,可以创建非`*_op.*`格式的文件来存放,如`gather.h`文件。 + + + +--- + +### ==10.== 使用相关问题 + +--- + +### 定义前向计算 + + + +- 当在python端执行时: + ```python + import paddle.v2.fluid as fluid + ``` + [`framework.py`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/framework.py#L1040)定义了两个全局`Program`: + ```python + # program is a global instance. + _main_program_ = Program() + _startup_program_ = Program() + ``` + +- 前向定义的过程就是不断往`mian_program`中添加Op和Variable +- 如果需要执行一个新的`mian_program`时,可以调用调用: + ```python + def switch_main_program(program): + """ + Switch the main program to a new program. + This funtion returns the previous main program. + """ + …… + ``` + + +--- + +### 自定义参数的初始化 + + + +- 调用`fluid.ParamAttr(……)`接口,自定义参数的初始化 + + ```python + w_param_attrs = ParamAttr(name=None, + initializer=UniformInitializer(low=-1.0, high=1.0, seed=0), + learning_rate=1.0, + regularizer=L1Decay(1.0), + trainable=True, + clip=GradientClipByValue(-1.0, 1.0), + ) + y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs) + ``` + +- 补充问题:如何创建 `Variable` + ```python + cur_program = Program() + cur_block = cur_program.current_block() + new_var = cur_block.create_var(name="X", shape=[-1, 16, 16], dtype="float32") + ``` + + + +--- + +### 添加反向Op + + + +- 调用`fluid.backward.append_backward(X)`(`X`是一个Variable),来为一段前向`ProgramDesc`添加反Op + + ```python + data = fluid.layers.data(name="data", shape=(2,3,4)) + out = fluid.layers.fc(input=data,size=128,act=None) + loss = fluid.layers.reduce_sum(out) + fluid.backward.append_backward(loss=loss) + ``` + +- 添加优化相关的Op + ```python + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(loss) + ``` + +- 可以随时调用`print(fluid.default_main_program())`来输出当前的`main_program` + +- 当构建完成整个`Program`后,调用下面的接口执行内存优化: + ```python + fluid.memory_optimize(fluid.default_main_program()) + ``` + - _注:内存优化目前仍在持续开发中,有可能不够稳定。_ + + + +--- + +### 总结:编译时执行流程 + + + +- 用户定义前向计算 +- 添加反向Op到`default_main_program` +- 添加 gradient clipping Op 到 +- 添加 regularization Op 到`default_main_program` +- 为指定的优化算法,添加相关的状态 variable of optimizer 到`default_startup_program` + - 状态相关 variable是指如学习率, 历史 momentum, 二阶momentum等 +- 添加初始化 variable 的Op 到 `default_startup_program` +- 为整个网络最后一个op,添加设置其接受到的梯度的Op到`default_main_program` +- 进行内存优化规划 + + + +--- + +### Feed 数据 (一):通过 feed 字典 + + + +- 执行executor的run方法时,指定feed字典,feed op 会将指定的数据放到`x`和`y`两个Variable中 + ```python + y_data = np.random.randint(0, 8, [1]).astype("int32") + y_tensor = core.Tensor() + y_tensor.set(y_data, place) + + x_data = np.random.uniform(0.1, 1, [11, 8]).astype("float32") + x_tensor = core.Tensor() + x_tensor.set(x_data, place) + …… + cost = exe.run( + fluid.default_main_program(), + feed={'x': x_tensor, + 'y': y_tensor}, + fetchlist=[avg_cost]) + ``` + +- 这种方法较为底层,一般用于单测中 + + + +--- + +### Feed 数据 (二):使用 DataFeeder接口 + + + +- 编写一个data_reader函数,data_reader是一个Python generator + + ```python + def demo_reader(): + def random_generator(): + yield np.random.uniform(0.1, 1, [4]), np.random.randint(0, 1, [1]) + return random_generator + ``` +- 在训练任务中使用 DataFeeder 接口 + ```python + cost = exe.run( + fluid.default_main_program(), + feed={'x': x_tensor, + 'y': y_tensor}, + fetchlist=[avg_cost]) + + train_reader = paddle.batch( + paddle.reader.shuffle(demo_reader(), buf_size=500), batch_size=4) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + for data in train_reader(): + cost = exe.run( + fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[cost]) + ``` + + + +--- + +### 常见问题 + + + +- 如何使用 evaluator ? [->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_label_semantic_roles.py#L168) + + ```python + accuracy = fluid.evaluator.Accuracy(input=predict, label=label) + for pass_id in range(PASS_NUM): + accuracy.reset() + for data in train_reader(): + loss, acc = exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_cost] + accuracy.metrics) + pass_acc = accuracy.eval(exe) + # acc 当前一个batch 的 accuracy + # pass_acc 当前batch 的 accuracy + pass_total_acc = accuracy.eval(exe) # 整个pass的accuracy + ``` + +- 如何在训练中测试?[->](https://github.com/dzhwinter/benchmark/blob/master/fluid/vgg16.py#L144) +- 如何保存训练好的模型?[->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_recognize_digits.py#L143) +- 如何加载训练好的模型进行预测?[->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_recognize_digits.py#L154) +- 如何在同一个训练任务中定义多个Program,并交替运行? [->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/demo/fc_gan.py) +- 如何profile?Fluid 实现了profile 工具,可以直接调用。请参考示例 [->](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/unittests/test_profiler.py) + + + + +--- diff --git a/doc/fluid/getstarted/index_cn.rst b/doc/fluid/getstarted/index_cn.rst index 75af7354be..3daea71d09 100644 --- a/doc/fluid/getstarted/index_cn.rst +++ b/doc/fluid/getstarted/index_cn.rst @@ -17,3 +17,4 @@ :maxdepth: 1 concepts/use_concepts_cn.rst + developer's_guide_to_paddle_fluid.md diff --git a/doc/fluid/getstarted/index_en.rst b/doc/fluid/getstarted/index_en.rst index 75a43f4af8..fb20bb4f24 100644 --- a/doc/fluid/getstarted/index_en.rst +++ b/doc/fluid/getstarted/index_en.rst @@ -16,3 +16,4 @@ Here is an example of linear regression. It introduces workflow of PaddlePaddle, :maxdepth: 1 concepts/index_en.rst + developer's_guide_to_paddle_fluid.md diff --git a/doc/fluid/getstarted/quickstart_cn.rst b/doc/fluid/getstarted/quickstart_cn.rst index 135beb75d0..6a964d4f85 100644 --- a/doc/fluid/getstarted/quickstart_cn.rst +++ b/doc/fluid/getstarted/quickstart_cn.rst @@ -11,7 +11,7 @@ PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14. pip install paddlepaddle -如果需要安装支持GPU的版本(cuda7.5_cudnn5_avx_openblas),需要执行: +如果需要安装支持GPU的版本(cuda8.0_cudnn5_avx_openblas),需要执行: .. code-block:: bash @@ -28,18 +28,18 @@ PaddlePaddle支持使用pip快速安装,目前支持CentOS 6以上, Ubuntu 14. import paddle.dataset.uci_housing as uci_housing import paddle.fluid as fluid - + with fluid.scope_guard(fluid.core.Scope()): # initialize executor with cpu exe = fluid.Executor(place=fluid.CPUPlace()) - # load inference model + # load inference model [inference_program, feed_target_names,fetch_targets] = \ fluid.io.load_inference_model(uci_housing.fluid_model(), exe) # run inference - result = exe.run(inference_program, - feed={feed_target_names[0]: uci_housing.predict_reader()}, + result = exe.run(inference_program, + feed={feed_target_names[0]: uci_housing.predict_reader()}, fetch_list=fetch_targets) - # print predicted price is $12,273.97 + # print predicted price is $12,273.97 print 'Predicted price: ${:,.2f}'.format(result[0][0][0] * 1000) 执行 :code:`python housing.py` 瞧! 它应该打印出预测住房数据的清单。 diff --git a/doc/fluid/getstarted/quickstart_en.rst b/doc/fluid/getstarted/quickstart_en.rst index df6619cfd0..680122f258 100644 --- a/doc/fluid/getstarted/quickstart_en.rst +++ b/doc/fluid/getstarted/quickstart_en.rst @@ -12,7 +12,7 @@ Simply run the following command to install, the version is cpu_avx_openblas: pip install paddlepaddle -If you need to install GPU version (cuda7.5_cudnn5_avx_openblas), run: +If you need to install GPU version (cuda8.0_cudnn5_avx_openblas), run: .. code-block:: bash @@ -31,18 +31,18 @@ code: import paddle.dataset.uci_housing as uci_housing import paddle.fluid as fluid - + with fluid.scope_guard(fluid.core.Scope()): # initialize executor with cpu exe = fluid.Executor(place=fluid.CPUPlace()) - # load inference model + # load inference model [inference_program, feed_target_names,fetch_targets] = \ fluid.io.load_inference_model(uci_housing.fluid_model(), exe) # run inference - result = exe.run(inference_program, - feed={feed_target_names[0]: uci_housing.predict_reader()}, + result = exe.run(inference_program, + feed={feed_target_names[0]: uci_housing.predict_reader()}, fetch_list=fetch_targets) - # print predicted price is $12,273.97 + # print predicted price is $12,273.97 print 'Predicted price: ${:,.2f}'.format(result[0][0][0] * 1000) Run :code:`python housing.py` and voila! It should print out a list of predictions diff --git a/doc/fluid/howto/cluster/fluid_cluster_train_cn.md b/doc/fluid/howto/cluster/fluid_cluster_train_cn.md index b99b90056b..55326940ce 100644 --- a/doc/fluid/howto/cluster/fluid_cluster_train_cn.md +++ b/doc/fluid/howto/cluster/fluid_cluster_train_cn.md @@ -168,13 +168,13 @@ cd /paddle/python/paddle/fluid/tests/book 第二步,启动Parameter Server: ```bash -PADDLE_INIT_PORT=6174 PADDLE_INIT_PSERVERS=192.168.1.2 TRAINERS=2 POD_IP=192.168.1.2 PADDLE_INIT_TRAINER_ID=1 TRAINING_ROLE=PSERVER python test_fit_a_line.py +PADDLE_PSERVER_PORT=6174 PADDLE_PSERVER_IPS=192.168.1.2 PADDLE_TRAINERS=2 PADDLE_CURRENT_IP=192.168.1.2 PADDLE_TRAINER_ID=1 PADDLE_TRAINING_ROLE=PSERVER python test_fit_a_line.py ``` 执行命令后请等待出现提示: ```Server listening on 192.168.1.2:6174 ```, 表示Paramter Server已经正常启动。 第三步,启动Trainer: ```bash -PADDLE_INIT_PORT=6174 PADDLE_INIT_PSERVERS=192.168.1.3 TRAINERS=2 POD_IP=192.168.1.3 PADDLE_INIT_TRAINER_ID=1 TRAINING_ROLE=TRAINER python test_fit_a_line.py +PADDLE_PSERVER_PORT=6174 PADDLE_PSERVER_IPS=192.168.1.3 PADDLE_TRAINERS=2 PADDLE_CURRENT_IPP=192.168.1.3 PADDLE_TRAINER_ID=1 PADDLE_TRAINING_ROLE=TRAINER python test_fit_a_line.py ``` 由于我们定义的Trainer的数量是2个,因此需要在另外一个计算节点上再启动一个Trainer。 diff --git a/doc/fluid/howto/cluster/fluid_recordio.md b/doc/fluid/howto/cluster/fluid_recordio.md new file mode 100644 index 0000000000..92859e8f62 --- /dev/null +++ b/doc/fluid/howto/cluster/fluid_recordio.md @@ -0,0 +1,127 @@ +# How to use RecordIO in Fluid + +If you want to use RecordIO as your training data format, you need to convert to your training data +to RecordIO files and reading them in the process of training, PaddlePaddle Fluid provides some +interface to deal with the RecordIO files. + +## Generate RecordIO File + +Before start training with RecordIO files, you need to convert your training data +to RecordIO format by `fluid.recordio_writer.convert_reader_to_recordio_file`, the sample codes +as follows: + +```python + reader = paddle.batch(mnist.train(), batch_size=1) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_file('./mnist.recordio', reader, feeder) +``` + +The above code snippet would generate a RecordIO `./mnist.recordio` on your host. + +**NOTE**: we recommend users to set `batch_size=1` when generating the recordio files so that users can +adjust it flexibly while reading it. + +## Use the RecordIO file in a Local Training Job + +PaddlePaddle Fluid provides an interface `fluid.layers.io.open_recordio_file` to load your RecordIO file +and then you can use them as a Layer in your network configuration, the sample codes as follows: + +```python + data_file = fluid.layers.io.open_recordio_file( + filename="./mnist.recordio", + shapes=[(-1, 784),(-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int32"]) + data_file = fluid.layers.io.batch(data_file, batch_size=4) + + img, label = fluid.layers.io.read_file(data_file) + hidden = fluid.layers.fc(input=img, size=100, act='tanh') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + avg_loss = fluid.layers.mean(loss) + + fluid.optimizer.Adam(learning_rate=1e-3).minimize(avg_loss) + + place = fluid.CPUPlace() + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + avg_loss_np = [] + + # train a pass + batch_id = 0 + while True: + tmp, = exe.run(fetch_list=[avg_loss]) + + avg_loss_np.append(tmp) + print(batch_id) + batch_id += 1 +``` + +## Use the RecordIO files in Distributed Training + +1. generate multiple RecordIO files + +For a distributed training job, you may have multiple trainer nodes, +and one or more RecordIO files for one trainer node, you can use the interface +`fluid.recordio_writer.convert_reader_to_recordio_files` to convert your training data +into multiple RecordIO files, the sample codes as follows: + +```python + reader = paddle.batch(mnist.train(), batch_size=1) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_files( + filename_suffix='./mnist.recordio', batch_per_file=100, reader, feeder) +``` + +The above codes would generate multiple RecordIO files on your host like: + +```bash +. + \_mnist-00000.recordio + |-mnist-00001.recordio + |-mnist-00002.recordio + |-mnist-00003.recordio + |-mnist-00004.recordio +``` + +2. open multiple RecordIO files by `fluid.layers.io.open_files` + +For a distributed training job, the distributed operator system will schedule trainer process on multiple nodes, +each trainer process reads parts of the whole training data, we usually take the following approach to make the training +data allocated by each trainer process as uniform as possiable: + +```python +def gen_train_list(file_pattern, trainers, trainer_id): + file_list = glob.glob(file_pattern) + ret_list = [] + for idx, f in enumerate(file_list): + if (idx + trainers) % trainers == trainer_id: + ret_list.append(f) + return ret_list + +trainers = int(os.getenv("PADDLE_TRAINERS")) +trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) +data_file = fluid.layers.io.open_files( + filenames=gen_train_list("./mnist-[0-9]*.recordio", 2, 0), + thread_num=1, + shapes=[(-1, 784),(-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int32"]) +img, label = fluid.layers.io.read_file(data_files) +... +``` diff --git a/doc/fluid/howto/cluster/nccl2_rdma_training.md b/doc/fluid/howto/cluster/nccl2_rdma_training.md new file mode 100644 index 0000000000..cecd5c3a7a --- /dev/null +++ b/doc/fluid/howto/cluster/nccl2_rdma_training.md @@ -0,0 +1,110 @@ +# Distributed Training with NCCL2 and RDMA + +When doing distributed multi-GPU training, network bandwith often becomes the +bottle neck. We introduce a way to use NCCL2 to do such training job to +achieve best performace. + +## Prepare Hardwares with RDMA and Multiple GPUs + +I'm using two Linux servers each of them is installed with 8 GPUs and +one 100Gb RDMA card. +Base environment is: + +* OS: CentOS 7.4 +* RDMA device: "Mellanox Technologies MT27700 Family [ConnectX-4]" +* Kernel version: `4.4.88-1.el7.elrepo.x86_64` +* Docker version: `1.12.6` +* Docker storage driver: `overlay2` +* IP addresses: 192.168.16.30,192.168.16.34 + +In general, the steps including: + +1. Install GPU drivers +1. Install RDMA drivers +1. Install "InfiniBand Support" +1. Use docker to run tests and make sure GPUs and RDMA can work inside + the container. + +I'll ommit section "Install GPU drivers" because we can find it easily +somewhere else. + +### Install RDMA drivers + +For my case, I've got two machines with device +"Mellanox Technologies MT27700 Family [ConnectX-4]" installed. The OS was +"CentOS 7.4" and I updated the kernel to version 4.4 so that docker can +work with latest overlay2 filesystem. + +***NOTE: before you start, make sure you have a way to get a console +of the server other than ssh because we may need to re-configure the +network device.*** + +1. Go to http://www.mellanox.com/page/products_dyn?product_family=26, + download `MLNX_OFED` software in the bottom of the page, and upload it + onto the server. +1. Run `./mlnxofedinstall --add-kernel-support` in the software package. +1. Run `/etc/init.d/openibd restart` to make everything work, note that + this operation may cause the network goes down if you are using this + RDMA device as default network device and use ssh to login the server. +1. Re-configure the network interface, for example: + `ifconfig eth2 192.168.16.30/20 up`, then add routes if needed: + `ip route add default via 192.168.16.1 dev eth2`. +1. Do the same thing on the other node. +1. Use `ping` to test if the two nodes have typical ICMP connection. +1. Use either `udaddy` or `ib_write_bw` to test the network connection is + ready and have the desired bandwith. + +### Prepare Docker Image to Run RDMA Programs + +1. Build a docker image using cuda base image like: `nvidia/cuda:8.0-cudnn5-devel-ubuntu16.04` and install paddlepaddle whl + package in it. +1. Start a docker container and mount GPU driver libs into it (you can + skip this step if you are using nvidia-docker). +1. Mount RDMA dirvers and libs into the docker image (see below section), + also `udaddy` and `ib_write_bw` if needed. +1. Mount GPU devices and RDMA devices into the container using `--device` + or just use privileged mode `--privileged`. +1. Start the container using host network mode: `--net=host` + +### RDMA Library Files Needed + +Usually, `MLNX_OFED` install latest supported libs under +`/usr/lib64/mlnx_ofed/valgrind`. Other libs also needed to run RDMA programs +is listed below. These libs must be mounted into the docker container. + +* Libs under `/usr/lib64/mlnx_ofed/valgrind` + * libibcm.so + * libibverbs.so + * libmlx4.so + * libmlx5.so + * libmlx5-rdmav2.so + * librdmacm.so +* Other libs: + * libnl-3.so.200 + * libnl-route-3.so.200 + * libnuma.so.1 + +## Start to Run the Training Job + +Setting NCCL environment variables to turn NCCL switches on and off: + + +| Env Name | Description | +| --- | --- | +| NCCL_SOCKET_IFNAME | The RDMA device, e.g. eth2 | +| NCCL_P2P_DISABLE | Set to 1 to disable P2P transfer between GPUs | +| NCCL_IB_DISABLE | Set to 1 to disable using RDMA | +| NCCL_IB_CUDA_SUPPORT | Set to 1 to enable GPU Direct if supported | +| NCCL_DEBUG | Set debug level: VERSION, WARN, INFO | + +My two servers are: `192.168.16.30,192.168.16.34`, On node 1, Run : + +```bash +PADDLE_TRAINER_ID=0 PADDLE_PORT=48372 PADDLE_WORKERS=192.168.16.30,192.168.16.34 POD_IP=192.168.16.30 stdbuf -oL python vgg16.py +``` + +On node 2, Run: + +```bash +PADDLE_TRAINER_ID=1 PADDLE_PORT=48372 PADDLE_WORKERS=192.168.16.30,192.168.16.34 POD_IP=192.168.16.34 stdbuf -oL python vgg16.py +``` diff --git a/doc/fluid/howto/index_cn.rst b/doc/fluid/howto/index_cn.rst index 97aeaf167d..b57af64f44 100644 --- a/doc/fluid/howto/index_cn.rst +++ b/doc/fluid/howto/index_cn.rst @@ -3,5 +3,6 @@ .. toctree:: :maxdepth: 1 - + + inference/index_cn.rst optimization/index_cn.rst diff --git a/doc/fluid/howto/inference/build_and_install_lib_cn.rst b/doc/fluid/howto/inference/build_and_install_lib_cn.rst new file mode 100644 index 0000000000..91357dd8c8 --- /dev/null +++ b/doc/fluid/howto/inference/build_and_install_lib_cn.rst @@ -0,0 +1,97 @@ +安装与编译C++预测库 +=========================== + +直接下载安装 +------------- + +====================== ======================================== +版本说明 C++预测库 +====================== ======================================== +cpu_avx_mkl `fluid.tgz `_ +cpu_avx_openblas `fluid.tgz `_ +cpu_noavx_openblas `fluid.tgz `_ +cuda7.5_cudnn5_avx_mkl `fluid.tgz `_ +cuda8.0_cudnn5_avx_mkl `fluid.tgz `_ +cuda8.0_cudnn7_avx_mkl `fluid.tgz `_ +cuda9.0_cudnn7_avx_mkl `fluid.tgz `_ +====================== ======================================== + +从源码编译 +---------- +用户也可以从 PaddlePaddle 核心代码编译C++预测库,只需在编译时配制下面这些编译选项: + +================= ========= +选项 值 +================= ========= +CMAKE_BUILD_TYPE Release +FLUID_INSTALL_DIR 安装路径 +WITH_FLUID_ONLY ON(推荐) +WITH_SWIG_PY OFF(推荐 +WITH_PYTHON OFF(推荐) +WITH_GPU ON/OFF +WITH_MKL ON/OFF +================= ========= + +建议按照推荐值设置,以避免链接不必要的库。其它可选编译选项按需进行设定。 + +下面的代码片段从github拉取最新代码,配制编译选项(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径): + + .. code-block:: bash + + pip install paddlepaddle-gpu + PADDLE_ROOT=/path/of/capi + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + mkdir build + cd build + cmake -DFLUID_INSTALL_DIR=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_FLUID_ONLY=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + .. + make + make inference_lib_dist + +成功编译后,使用C++预测库所需的依赖(包括:(1)编译出的PaddlePaddle预测库和头文件;(2)第三方链接库和头文件;(3)版本信息与编译选项信息) +均会存放于PADDLE_ROOT目录中。目录结构如下: + + .. code-block:: text + + PaddleRoot/ + ├── CMakeCache.txt + ├── paddle + │   └── fluid + │   ├── framework + │   ├── inference + │   ├── memory + │   ├── platform + │   ├── pybind + │   └── string + ├── third_party + │   ├── boost + │   │   └── boost + │   ├── eigen3 + │   │   ├── Eigen + │   │   └── unsupported + │   └── install + │   ├── gflags + │   ├── glog + │   ├── mklml + │   ├── protobuf + │   ├── snappy + │   ├── snappystream + │   └── zlib + └── version.txt + +version.txt 中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、CUDA/CUDNN版本号,如: + + .. code-block:: text + + GIT COMMIT ID: c95cd4742f02bb009e651a00b07b21c979637dc8 + WITH_MKL: ON + WITH_GPU: ON + CUDA version: 8.0 + CUDNN version: v5 diff --git a/doc/fluid/howto/inference/index_cn.rst b/doc/fluid/howto/inference/index_cn.rst new file mode 100644 index 0000000000..a903423548 --- /dev/null +++ b/doc/fluid/howto/inference/index_cn.rst @@ -0,0 +1,8 @@ +预测库 +------------ + +.. toctree:: + :maxdepth: 1 + + build_and_install_lib_cn.rst + inference_support_in_fluid_cn.md diff --git a/doc/fluid/howto/inference/inference_support_in_fluid_cn.md b/doc/fluid/howto/inference/inference_support_in_fluid_cn.md new file mode 100644 index 0000000000..309b17fccd --- /dev/null +++ b/doc/fluid/howto/inference/inference_support_in_fluid_cn.md @@ -0,0 +1,304 @@ +# 使用指南 + +## 目录: + +- Python Inference API +- Inference C++ API +- Inference实例 +- Inference计算优化 + +## Python Inference API **[改进中]** +- 保存Inference模型 ([链接](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/io.py#L295)) + + ```python + def save_inference_model(dirname, + feeded_var_names, + target_vars, + executor, + main_program=None, + model_filename=None, + params_filename=None): + ``` + Inference模型和参数将会保存到`dirname`目录下: + - 序列化的模型 + - `model_filename`为`None`,保存到`dirname/__model__` + - `model_filename`非`None`,保存到`dirname/model_filename` + - 参数 + - `params_filename`为`None`,单独保存到各个独立的文件,各文件以参数变量的名字命名 + - `params_filename`非`None`,保存到`dirname/params_filename` + +- 两种存储格式 + - 参数保存到各个独立的文件 + - 如,设置`model_filename`为`None`、`params_filename`为`None` + + ```bash + $ cd recognize_digits_conv.inference.model + $ ls + $ __model__ batch_norm_1.w_0 batch_norm_1.w_2 conv2d_2.w_0 conv2d_3.w_0 fc_1.w_0 batch_norm_1.b_0 batch_norm_1.w_1 conv2d_2.b_0 conv2d_3.b_0 fc_1.b_0 + ``` + - 参数保存到同一个文件 + - 如,设置`model_filename`为`None`、`params_filename`为`__params__` + + ```bash + $ cd recognize_digits_conv.inference.model + $ ls + $ __model__ __params__ + ``` +- 加载Inference模型([链接](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/io.py#L380)) + ```python + def load_inference_model(dirname, + executor, + model_filename=None, + params_filename=None): + ... + return [program, feed_target_names, fetch_targets] + ``` + +## 链接Fluid Inference库 +- 示例项目([链接](https://github.com/luotao1/fluid_inference_example.git)) + + - GCC配置 + ```bash + $ g++ -o a.out -std=c++11 main.cc \ + -I${PADDLE_ROOT}/ \ + -I${PADDLE_ROOT}/third_party/install/gflags/include \ + -I${PADDLE_ROOT}/third_party/install/glog/include \ + -I${PADDLE_ROOT}/third_party/install/protobuf/include \ + -I${PADDLE_ROOT}/third_party/eigen3 \ + -L${PADDLE_ROOT}/paddle/fluid/inference -lpaddle_fluid \ + -lrt -ldl -lpthread + ``` + + - CMake配置 + ```cmake + include_directories(${PADDLE_ROOT}/) + include_directories(${PADDLE_ROOT}/third_party/install/gflags/include) + include_directories(${PADDLE_ROOT}/third_party/install/glog/include) + include_directories(${PADDLE_ROOT}/third_party/install/protobuf/include) + include_directories(${PADDLE_ROOT}/third_party/eigen3) + target_link_libraries(${TARGET_NAME} + ${PADDLE_ROOT}/paddle/fluid/inference/libpaddle_fluid.so + -lrt -ldl -lpthread) + ``` + + - 设置环境变量: + `export LD_LIBRARY_PATH=${PADDLE_ROOT}/paddle/fluid/inference:$LD_LIBRARY_PATH` + + + +## C++ Inference API + +- 推断流程([链接](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/test_helper.h#L91)) + + - 1、 初始化设备 + ```cpp + #include "paddle/fluid/framework/init.h" + paddle::framework::InitDevices(false); + ``` + + - 2、 定义place,executor,scope + ```cpp + auto place = paddle::platform::CPUPlace(); + auto executor = paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + ``` + + - 3、 加载模型 + ```cpp + #include "paddle/fluid/inference/io.h" + auto inference_program = paddle::inference::Load(executor, *scope, dirname); + // or + auto inference_program = paddle::inference::Load(executor, + *scope, + dirname + "/" + model_filename, + dirname + "/" + params_filename); + ``` + + - 4、 获取`feed_target_names`和`fetch_target_names` + ```cpp + const std::vector& feed_target_names = inference_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = inference_program->GetFetchTargetNames(); + ``` + + - 5、 准备`feed`数据 + ```cpp + #include "paddle/fluid/framework/lod_tensor.h" + std::vector cpu_feeds; + ... + std::map feed_targets; + for (size_t i = 0; i < feed_target_names.size(); ++i) { + // Please make sure that cpu_feeds[i] is right for feed_target_names[i] + feed_targets[feed_target_names[i]] = cpu_feeds[i]; + } + ``` + + - 6、 定义`Tensor`来`fetch`结果 + ```cpp + std::vector cpu_fetchs; + std::map fetch_targets; + for (size_t i = 0; i < fetch_target_names.size(); ++i) { + fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; + } + ``` + + - 7、 执行`inference_program` + ```cpp + executor.Run(*inference_program, scope, feed_targets, fetch_targets); + ``` + + - 8、 使用`fetch`数据 + ```cpp + for (size_t i = 0; i < cpu_fetchs.size(); ++i) { + std::cout << "lod_i: " << cpu_fetchs[i]->lod(); + std::cout << "dims_i: " << cpu_fetchs[i]->dims(); + std::cout << "result:"; + float* output_ptr = cpu_fetchs[i]->data(); + for (int j = 0; j < cpu_fetchs[i]->numel(); ++j) { + std::cout << " " << output_ptr[j]; + } + std::cout << std::endl; + } + ``` + 针对不同的数据,4. - 8.可执行多次。 + + - 9、 释放内存 + ```cpp + delete scope; + ``` + + +- 接口说明 + + ```cpp + void Run(const ProgramDesc& program, Scope* scope, + std::map& feed_targets, + std::map& fetch_targets, + bool create_vars = true, + const std::string& feed_holder_name = "feed", + const std::string& fetch_holder_name = "fetch"); + ``` + - 使用Python API `save_inference_model`保存的`program`里面包含了`feed_op`和`fetch_op`,用户提供的`feed_targets`、`fetch_targets`必须和`inference_program`中的`feed_op`、`fetch_op`保持一致。 + - 用户提供的`feed_holder_name`和`fetch_holder_name`也必须和`inference_program`中`feed_op`、`fetch_op`保持一致,可使用`SetFeedHolderName`和`SetFetchHolderName`接口重新设置`inferece_program` + - 默认情况下,除了`persistable`属性设置为`True`的`Variable`之外,每次执行`executor.Run`会创建一个局部`Scope`,并且在这个局部`Scope`中创建和销毁所有的`Variable`,以最小化空闲时的内存占用。 + - `persistable`属性为`True`的`Variable`有: + - Operators的参数`w`、`b`等 + - `feed_op`的输入变量 + - `fetch_op`的输出变量 + + +- **不在每次执行时创建和销毁变量 + ([PR](https://github.com/PaddlePaddle/Paddle/pull/9301))** + - 执行`inference_program` + ```cpp + // Call once + executor.CreateVariables(*inference_program, scope, 0); + // Call as many times as you like + executor.Run( + *inference_program, scope, feed_targets, fetch_targets, false); + ``` + - **优点** + - 节省了频繁创建、销毁变量的时间(约占每次`Run`总时间的1% ~ 12%) + - 执行结束后可获取所有Operators的计算结果 + - **缺点** + - 空闲时也会占用大量的内存 + - 在同一个`Scope`中,相同的变量名是公用同一块内存的,容易引起意想不到的错误 + + +- **不在每次执行时创建Op([PR](https://github.com/PaddlePaddle/Paddle/pull/9630))** + - 执行`inference_program` + ```cpp + // Call once + auto ctx = executor.Prepare(*inference_program, 0); + // Call as many times as you like if you have no need to change the inference_program + executor.RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets); + ``` + - **优点** + - 节省了频繁创建、销毁Op的时间 + - **缺点** + - 一旦修改了`inference_program`,则需要重新创建`ctx` + + +- **多线程共享Parameters([链接](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/test_multi_thread_helper.h))** + - 主线程 + - 1、 初始化设备 + - 2、 定义`place`,`executor`,`scope` + - 3、 加载模型,得到`inference_program` + - 从线程 + - **复制`inference_program`得到`copy_program`,修改`copy_program`的`feed_holder_name`和`fetch_holder_name`** + ```cpp + auto copy_program = std::unique_ptr( + new paddle::framework::ProgramDesc(*inference_program)); + std::string feed_holder_name = "feed_" + paddle::string::to_string(thread_id); + std::string fetch_holder_name = "fetch_" + paddle::string::to_string(thread_id); + copy_program->SetFeedHolderName(feed_holder_name); + copy_program->SetFetchHolderName(fetch_holder_name); + ``` + - 4、 获取`copy_program`的`feed_target_names`和`fetch_target_names` + - 5、 准备feed数据,定义Tensor来fetch结果 + - 6、 执行`copy_program` + ```cpp + executor->Run(*copy_program, scope, feed_targets, fetch_targets, true, feed_holder_name, fetch_holder_name); + ``` + - 7、 使用fetch数据 + - 主线程 + - 8、 释放资源 + + +- 基本概念 + - 数据相关: + - [Tensor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/tensor.md),一个N维数组,数据可以是任意类型(int,float,double等) + - [LoDTensor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/lod_tensor.md),带LoD(Level-of-Detail)即序列信息的Tensor + - [Scope](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/design/scope.md),记录了变量Variable + - 执行相关: + - [Executor](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/executor.md),无状态执行器,只跟设备相关 + - Place + - CPUPlace,CPU设备 + - CUDAPlace,CUDA GPU设备 + - 神经网络表示: + - [Program](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/design/concepts/program.md). + + 详细介绍请参考[**Paddle Fluid开发者指南**](https://github.com/lcy-seso/learning_notes/blob/master/Fluid/developer's_guid_for_Fluid/Developer's_Guide_to_Paddle_Fluid.md) + + + +## Inference实例 + + 1. fit a line: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_fit_a_line.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc) + 1. image classification: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_image_classification.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_image_classification.cc) + 1. label semantic roles: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_label_semantic_roles.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc) + 1. recognize digits: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_recognize_digits.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc) + 1. recommender system: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_recommender_system.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc) + 1. understand sentiment: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_understand_sentiment.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc) + 1. word2vec: [Python](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/tests/book/test_word2vec.py), [C++](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/tests/book/test_inference_word2vec.cc) + + +## Inference计算优化 +- 使用Python推理优化工具([inference_transpiler](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/inference_transpiler.py)) + ```python + class InferenceTranspiler: + def transpile(self, program, place, scope=None): + ... + if scope is None: + scope = global_scope() + ... + ``` + - 使用`InferenceTranspiler`将会直接修改`program`。 + - 使用`InferenceTranspiler`会修改参数的值,请确保`program`的参数在`scope`内。 +- 支持的优化 + - 融合batch_norm op的计算 +- 使用示例([链接](https://github.com/Xreki/Xreki.github.io/blob/master/fluid/inference/inference_transpiler.py)) + ```python + import paddle.fluid as fluid + # NOTE: Applying the inference transpiler will change the inference_program. + t = fluid.InferenceTranspiler() + t.transpile(inference_program, place, inference_scope) + ``` + + + + +## 内存使用优化 +- 使用Python内存优化工具([memory_optimization_transipiler](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/memory_optimization_transpiler.py)) + ```python + fluid.memory_optimize(inference_program) + ``` diff --git a/doc/fluid/howto/optimization/benchmark/README.md b/doc/fluid/howto/optimization/benchmark/README.md deleted file mode 120000 index db30af7f53..0000000000 --- a/doc/fluid/howto/optimization/benchmark/README.md +++ /dev/null @@ -1 +0,0 @@ -../../../../../benchmark/cluster/README.md \ No newline at end of file diff --git a/doc/fluid/howto/optimization/benchmark/vgg16/README.md b/doc/fluid/howto/optimization/benchmark/vgg16/README.md deleted file mode 120000 index ca963ef5f0..0000000000 --- a/doc/fluid/howto/optimization/benchmark/vgg16/README.md +++ /dev/null @@ -1 +0,0 @@ -../../../../../../benchmark/cluster/vgg16/README.md \ No newline at end of file diff --git a/doc/fluid/howto/optimization/cpu_profiling_cn.md b/doc/fluid/howto/optimization/cpu_profiling_cn.md index 8266dec3c6..198a05a79e 100644 --- a/doc/fluid/howto/optimization/cpu_profiling_cn.md +++ b/doc/fluid/howto/optimization/cpu_profiling_cn.md @@ -1,3 +1,5 @@ +# CPU性能调优 + 此教程会介绍如何使用Python的cProfile包、Python库yep、Google perftools来进行性能分析 (profiling) 与调优(performance tuning)。 Profling 指发现性能瓶颈。系统中的瓶颈可能和程序员开发过程中想象的瓶颈相去甚远。Tuning 指消除瓶颈。性能优化的过程通常是不断重复地 profiling 和 tuning。 @@ -8,7 +10,7 @@ PaddlePaddle 用户一般通过调用 Python API 编写深度学习程序。大 * Python 与 C++ 混合代码的性能分析 -# Python代码的性能分析 +## Python代码的性能分析 ### 生成性能分析文件 diff --git a/doc/fluid/howto/optimization/cpu_profiling_en.md b/doc/fluid/howto/optimization/cpu_profiling_en.md index e95556dd60..216694965b 100644 --- a/doc/fluid/howto/optimization/cpu_profiling_en.md +++ b/doc/fluid/howto/optimization/cpu_profiling_en.md @@ -1,3 +1,5 @@ +# Tune CPU performance + This tutorial introduces techniques we use to profile and tune the CPU performance of PaddlePaddle. We will use Python packages `cProfile` and `yep`, and Google's `perftools`. @@ -14,7 +16,7 @@ the profiling and tuning of 1. the Python code and 1. the mixture of Python and C++ code. -# Profiling the Python Code +## Profiling the Python Code ### Generate the Performance Profiling File diff --git a/doc/fluid/howto/optimization/host_memory_profiling_cn.md b/doc/fluid/howto/optimization/host_memory_profiling_cn.md new file mode 100644 index 0000000000..7fb0883dd9 --- /dev/null +++ b/doc/fluid/howto/optimization/host_memory_profiling_cn.md @@ -0,0 +1,89 @@ +# 堆内存分析和优化 + +计算机程序都可能有内存泄漏的风险。**内存泄漏**一般是由于程序在堆(heap)上分配了内存而没有释放,随着程序的运行占用的内存越来越大,一方面会影响程序的稳定性,可能让运行速度越来越慢,或者造成oom,甚至会影响运行程序的机器的稳定性,造成宕机。 + + +目前有很多内存泄漏分析工具,比较经典的有[valgrind](http://valgrind.org/docs/manual/quick-start.html#quick-start.intro), [gperftools](https://gperftools.github.io/gperftools/)。 + +因为Fluid是用Python驱动C++ core来运行,valgrind直接分析非常困难,需要自己编译debug版本的、带valgrind支持的专用Python版本,而且输出的信息中大部分是Python自己的符号和调用信息,分析起来很困难,另外使用valgrind会让程序运行速度变得非常慢,所以不建议使用。 + +本教程主要介绍[gperftools](https://gperftools.github.io/gperftools/)的使用。 + +gperftool主要支持以下四个功能: + +- thread-caching malloc +- heap-checking using tcmalloc +- heap-profiling using tcmalloc +- CPU profiler + +Paddle也提供了基于gperftool的[CPU性能分析教程](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/howto/optimization/cpu_profiling_cn.md)。 + +对于堆内存的分析,主要用到thread-caching malloc和heap-profiling using tcmalloc。 + +## 环境 + +本教程基于paddle提供的Docker开发环境paddlepaddle/paddle:latest-dev,基于Ubuntu 16.04.4 LTS环境。 + +## 使用流程 + +- 安装google-perftools + +``` +apt-get install libunwind-dev +apt-get install google-perftools +``` + +- 安装pprof + +``` +go get -u github.com/google/pprof +``` + +- 设置运行环境 + +``` +export PPROF_PATH=/root/gopath/bin/pprof +export PPROF_BINARY_PATH=/root/gopath/bin/pprof +export LD_PRELOAD=/usr/lib/libtcmalloc.so.4 +``` + +- 使用heap profile来运行python程序。本质上是周期性的对堆的分配情况做一次快照。 + +``` +# HEAPPROFILE 设置生成的堆分析文件的目录和文件前缀 +# HEAP_PROFILE_ALLOCATION_INTERVAL 设置每分配多少存储dump一次dump,默认1GB +env HEAPPROFILE="./perf_log/test.log" HEAP_PROFILE_ALLOCATION_INTERVAL=209715200 python trainer.py +``` + +随着程序的运行,会在perf_log这个文件夹下生成很多文件,如下: + +``` +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0001.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0002.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0003.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0004.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0005.heap +-rw-r--r-- 1 root root 1.0M Jun 1 15:00 test.log.0006.heap +``` + +- 使用pprof对heap文件进行分析。分析有两种模式: + - 完整模式。会对当前heap做一个分析,显示目前分配内存一些调用路径。 + + ``` + pprof --pdf python test.log.0012.heap + ``` + 上述命令会生成一个profile00x.pdf的文件,可以直接打开,例如:[memory_cpu_allocator](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_cpu_allocator.pdf)。从下图可以看出,在CPU版本fluid的运行过程中,分配存储最多的模块式CPUAllocator. 而别的模块相对而言分配内存较少,所以被忽略了,这对于分配内存泄漏是很不方便的,因为泄漏是一个缓慢的过程,在这种图中是无法看到的。 + + ![result](https://user-images.githubusercontent.com/3048612/40964027-a54033e4-68dc-11e8-836a-144910c4bb8c.png) + + - Diff模式。可以对两个时刻的heap做diff,把一些内存分配没有发生变化的模块去掉,而把增量部分显示出来。 + ``` + pprof --pdf --base test.log.0010.heap python test.log.1045.heap + ``` + 生成的结果为:[`memory_leak_protobuf`](https://github.com/jacquesqiao/Paddle/blob/bd2ea0e1f84bb6522a66d44a072598153634cade/doc/fluid/howto/optimization/memory_leak_protobuf.pdf) + + 从图中可以看出:ProgramDesc这个结构,在两个版本之间增长了200MB+,所以这里有很大的内存泄漏的可能性,最终结果也确实证明是这里造成了泄漏。 + + ![result](https://user-images.githubusercontent.com/3048612/40964057-b434d5e4-68dc-11e8-894b-8ab62bcf26c2.png) + ![result](https://user-images.githubusercontent.com/3048612/40964063-b7dbee44-68dc-11e8-9719-da279f86477f.png) + diff --git a/doc/fluid/howto/optimization/timeline.md b/doc/fluid/howto/optimization/timeline.md deleted file mode 100644 index 96481ae2a6..0000000000 --- a/doc/fluid/howto/optimization/timeline.md +++ /dev/null @@ -1,27 +0,0 @@ -# how to use timeline tool to do profile - -1. Add `with profiler.profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. - - ```python - with profiler.profiler('All', 'total', '/tmp/profile') as prof: - for pass_id in range(pass_num): - for batch_id, data in enumerate(train_reader()): - exe.run(fluid.default_main_program(), - feed=feeder.feed(data), - fetch_list=[], - use_program_cache=True) - ... - ``` - -1. Run `python paddle/tools/timeline.py` to process `/tmp/profile`, it will generate another -file `/tmp/timeline` by default. You can change the path by cmd parameter, please take a look at -[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py) for details. - -1. Open chrome and visit , use `load` button to load the generated `timeline` file. - - ![chrome tracing](./tracing.jpeg) - -1. The resulting timeline should be like: - - - ![chrome timeline](./timeline.jpeg) diff --git a/doc/fluid/howto/optimization/timeline_cn.md b/doc/fluid/howto/optimization/timeline_cn.md new file mode 100644 index 0000000000..faf39f276d --- /dev/null +++ b/doc/fluid/howto/optimization/timeline_cn.md @@ -0,0 +1,32 @@ +# 如何使用timeline工具做性能分析 + +1. 在训练的主循环外加上`profiler.start_profiler(...)`和`profiler.stop_profiler(...)`。运行之后,代码会在`/tmp/profile`目录下生成一个profile的记录文件。 + + **提示:** + 请不要在timeline记录信息时运行太多次迭代,因为timeline中的记录数量和迭代次数是成正比的。 + + ```python + for pass_id in range(pass_num): + for batch_id, data in enumerate(train_reader()): + if pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile") + exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[]) + ... + ``` + +1. 运行`python paddle/tools/timeline.py`来处理`/tmp/profile`,这个程序默认会生成一个`/tmp/timeline`文件,你也可以用命令行参数来修改这个路径,请参考[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py)。 +```python +python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline +``` + +1. 打开chrome浏览器,访问,用`load`按钮来加载生成的`timeline`文件。 + + ![chrome tracing](./tracing.jpeg) + +1. 结果如下图所示,可以放到来查看timetime的细节信息。 + + ![chrome timeline](./timeline.jpeg) diff --git a/doc/fluid/howto/optimization/timeline_en.md b/doc/fluid/howto/optimization/timeline_en.md new file mode 100644 index 0000000000..6f963c6b4d --- /dev/null +++ b/doc/fluid/howto/optimization/timeline_en.md @@ -0,0 +1,33 @@ +# how to use timeline tool to do profile + +1. Add `profiler.start_profiler(...)`和`profiler.stop_profiler(...)` to the main training loop. After run, the code will generate a profile record file `/tmp/profile`. **Warning**: Please do not run too many batches when use profiler to record timeline information, for the profile record will grow with the batch number. + + ```python + for pass_id in range(pass_num): + for batch_id, data in enumerate(train_reader()): + if pass_id == 0 and batch_id == 5: + profiler.start_profiler("All") + elif pass_id == 0 and batch_id == 10: + profiler.stop_profiler("total", "/tmp/profile") + exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[]) + ... + ``` + +1. Run `python paddle/tools/timeline.py` to process `/tmp/profile`, it will generate another +file `/tmp/timeline` by default. You can change the path by cmd parameter, please take a look at +[timeline.py](https://github.com/PaddlePaddle/Paddle/blob/develop/tools/timeline.py) for details. + +```python +python Paddle/tools/timeline.py --profile_path=/tmp/profile --timeline_path=timeline +``` + +1. Open chrome and visit , use `load` button to load the generated `timeline` file. + + ![chrome tracing](./tracing.jpeg) + +1. The resulting timeline should be like: + + + ![chrome timeline](./timeline.jpeg) diff --git a/doc/fluid/howto/performance/error_clip.md b/doc/fluid/howto/performance/error_clip.md index 58aa73b8cd..749cf7693c 100644 --- a/doc/fluid/howto/performance/error_clip.md +++ b/doc/fluid/howto/performance/error_clip.md @@ -78,7 +78,7 @@ def error_clip_callback(block, context): op_desc = block.desc.op(block.desc.op_size() - 1) for grad_n in filter(lambda n: grad_to_var.has_key(n), op_desc.output_arg_names()): - fwd_var = block.var_recursive(grad_to_var[grad_n]) + fwd_var = block.__var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) if not (error_clip is None or isinstance(error_clip, BaseErrorClipAttr)): diff --git a/doc/fluid/images/1.png b/doc/fluid/images/1.png new file mode 100644 index 0000000000..67daf566f9 Binary files /dev/null and b/doc/fluid/images/1.png differ diff --git a/doc/fluid/images/2.png b/doc/fluid/images/2.png new file mode 100644 index 0000000000..43367777f4 Binary files /dev/null and b/doc/fluid/images/2.png differ diff --git a/doc/fluid/images/3.png b/doc/fluid/images/3.png new file mode 100644 index 0000000000..481021ef30 Binary files /dev/null and b/doc/fluid/images/3.png differ diff --git a/doc/fluid/images/4.png b/doc/fluid/images/4.png new file mode 100644 index 0000000000..4279f41e06 Binary files /dev/null and b/doc/fluid/images/4.png differ diff --git a/doc/fluid/images/LoDTensor.png b/doc/fluid/images/LoDTensor.png new file mode 100644 index 0000000000..75369f5378 Binary files /dev/null and b/doc/fluid/images/LoDTensor.png differ diff --git a/doc/fluid/images/compile_run_time.png b/doc/fluid/images/compile_run_time.png new file mode 100644 index 0000000000..0bc9b2fd0e Binary files /dev/null and b/doc/fluid/images/compile_run_time.png differ diff --git a/doc/fluid/images/executor.png b/doc/fluid/images/executor.png new file mode 100644 index 0000000000..b29c0d779e Binary files /dev/null and b/doc/fluid/images/executor.png differ diff --git a/doc/fluid/images/fluid_examples.png b/doc/fluid/images/fluid_examples.png new file mode 100644 index 0000000000..aa99472c0f Binary files /dev/null and b/doc/fluid/images/fluid_examples.png differ diff --git a/doc/fluid/images/fluid_module_1.png b/doc/fluid/images/fluid_module_1.png new file mode 100644 index 0000000000..554782ba54 Binary files /dev/null and b/doc/fluid/images/fluid_module_1.png differ diff --git a/doc/fluid/images/fluid_module_2.png b/doc/fluid/images/fluid_module_2.png new file mode 100644 index 0000000000..4219efccbb Binary files /dev/null and b/doc/fluid/images/fluid_module_2.png differ diff --git a/doc/fluid/images/layer.png b/doc/fluid/images/layer.png new file mode 100644 index 0000000000..e46db4c9c6 Binary files /dev/null and b/doc/fluid/images/layer.png differ diff --git a/doc/fluid/images/operator1.png b/doc/fluid/images/operator1.png new file mode 100644 index 0000000000..3975b06f61 Binary files /dev/null and b/doc/fluid/images/operator1.png differ diff --git a/doc/fluid/images/operator2.png b/doc/fluid/images/operator2.png new file mode 100644 index 0000000000..b7bb1fae20 Binary files /dev/null and b/doc/fluid/images/operator2.png differ diff --git a/doc/fluid/images/place.png b/doc/fluid/images/place.png new file mode 100644 index 0000000000..14e77511d6 Binary files /dev/null and b/doc/fluid/images/place.png differ diff --git a/doc/fluid/images/print_fluid_program.png b/doc/fluid/images/print_fluid_program.png new file mode 100644 index 0000000000..e8e459e1b3 Binary files /dev/null and b/doc/fluid/images/print_fluid_program.png differ diff --git a/doc/fluid/images/program_desc1.png b/doc/fluid/images/program_desc1.png new file mode 100644 index 0000000000..0656336914 Binary files /dev/null and b/doc/fluid/images/program_desc1.png differ diff --git a/doc/fluid/images/program_desc2.png b/doc/fluid/images/program_desc2.png new file mode 100644 index 0000000000..db5bfa1231 Binary files /dev/null and b/doc/fluid/images/program_desc2.png differ diff --git a/doc/fluid/images/raw_input.png b/doc/fluid/images/raw_input.png new file mode 100644 index 0000000000..0725f92d2b Binary files /dev/null and b/doc/fluid/images/raw_input.png differ diff --git a/doc/fluid/images/scope_variable_tensor.png b/doc/fluid/images/scope_variable_tensor.png new file mode 100644 index 0000000000..59b0de6fb3 Binary files /dev/null and b/doc/fluid/images/scope_variable_tensor.png differ diff --git a/doc/fluid/images/sorted_input.png b/doc/fluid/images/sorted_input.png new file mode 100644 index 0000000000..ff60112836 Binary files /dev/null and b/doc/fluid/images/sorted_input.png differ diff --git a/doc/fluid/images/transpiler.png b/doc/fluid/images/transpiler.png new file mode 100644 index 0000000000..422973c0dc Binary files /dev/null and b/doc/fluid/images/transpiler.png differ diff --git a/doc/fluid/images/user_interface.png b/doc/fluid/images/user_interface.png new file mode 100644 index 0000000000..ffc94e3d89 Binary files /dev/null and b/doc/fluid/images/user_interface.png differ diff --git a/doc/fluid/index_cn.rst b/doc/fluid/index_cn.rst index d878d192ca..6b1ef3ceed 100644 --- a/doc/fluid/index_cn.rst +++ b/doc/fluid/index_cn.rst @@ -1,12 +1,16 @@ - PaddlePaddle Fluid -========================== +.. PaddlePaddle Fluid documentation master file, created by + sphinx-quickstart on Thu Jun 7 17:04:53 2018. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +############## +欢迎使用 Fluid +############## .. toctree:: - :maxdepth: 1 + :maxdepth: 1 - getstarted/index_cn.rst - build_and_install/index_cn.rst - design/index_cn.rst - howto/index_cn.rst - dev/index_cn.rst - faq/index_cn.rst + new_docs/beginners_guide/index.rst + new_docs/user_guides/index.rst + new_docs/advanced_usage/index.rst + new_docs/faq/index_cn.rst diff --git a/doc/fluid/new_docs/advanced_usage/benchmark.rst b/doc/fluid/new_docs/advanced_usage/benchmark.rst new file mode 100644 index 0000000000..7854263bf8 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/benchmark.rst @@ -0,0 +1,120 @@ +################# +如何进行基准测试 +################# + +本文介绍如何给深度学习框架做基准测试。基准测试主要包含验证模型的精度和性能两方面,下文包含搭建测试环境,选择基准测试模型,验证测试结果等几方面内容。 + +验证深度学习框架,可分为训练和测试两个阶段, 验证指标略有不同,本文只介绍训练阶段的指标验证。训练阶段关注的是模型训练集上的精度,训练集是完备的,因此关注大batch\_size下的训练速度,关注吞吐量,例如图像模型常用的batch\_size=128, 多卡情况下会加大;预测阶段关注的是在测试集上的精度,线上服务测试数据不能提前收集,因此关注小batch\_size下的预测速度,关注延迟,例如预测服务常用的batch\_size=1, 4等。 + +`Fluid `__ 是PaddlePaddle从0.11.0版本开始引入的设计,本文的基准测试在该版本上完成。 + + +环境搭建 +"""""""""""" + +基准测试中模型精度和硬件、框架无关,由模型结构和数据共同决定;性能方面由测试硬件和框架性能决定。框架基准测试为了对比框架之间的差异,控制硬件环境,系统库等版本一致。下文中的对比实验都在相同的硬件条件和系统环境条件下进行. + + +不同架构的GPU卡性能差异巨大,在验证模型在GPU上训练性能时,可使用NVIDIA提供的工具:code `nvidia-smi` 检验当前使用的GPU型号,如果测试多卡训练性能,需确认硬件连接是 `nvlink `__ 或 `PCIe `__ 。 同样地,CPU型号会极大影响模型在CPU上的训练性能。可读取`/proc/cpuinfo`中的参数,确认当前正在使用的CPU型号。 + +下载GPU对应的Cuda Tool Kit和 Cudnn,或者使用NVIDIA官方发布的nvidia-docker镜像 `nvidia-docker `__, 镜像内包含了Cuda和Cudnn,本文采用这种方式。 Cuda Tool Kit包含了GPU代码使用到的基础库,影响在此基础上编译出的Fluid二进制运行性能。 + +准备好Cuda环境后,从github上的下载Paddle并源码编译,会生成对应的最适合当前GPU的sm\_arch二进制\ `sm\_arch `__\ 。另外,cudnn对卷积类任务影响巨大,在基准测试中需要小版本一致,例如Cudnn7.0.2与Cudnn7.1.4在Resnet上有5%以上差异。 + + +选择基准模型 +"""""""""""" + +对框架做基准测试,需要覆盖不同训练任务和不同大小的模型,本文中选取了图像和NLP的最为常用的5个模型。 + +============ ============ ================= ============ +任务种类 模型名称 网络结构 数据集 +============ ============ ================= ============ +图像分类 mnist Lenet mnist +图像分类 VGG VGG-16 Flowers102 +图像分类 Resnet Resnet-50 Flowers102 +文本分类 Stacked-LSTM Stacked-LSTM IMDB +机器翻译 seq-seq Stacked-LSTM wmt14 +============ ============ ================= ============ + +其中mnist, VGG, Resnet属于CNN模型, stacked-lstm, seq2seq代表RNN模型。 +`benchmark `__ +基准模型测试脚本中,均跳过了前几个batch的训练过程,原因是加载数据和分配显存受系统当前运行情况影响,会导致统计性能不准确。运行完若干个轮次后,统计对应指标。 + + +基准模型的数据的选择方面,数据量大且验证效果多的公开数据集为首选。图像模型VGG和resnet, 本文选择了 `flowers102 `__ ,图像大小预处理为和Imagenet相同大小,因此性能可直接对比 +NLP模型的公开且影响力大数据集较少,seq2seq模型选择了wmt14数据,stacked-lstm模型中选择了 `imdb `__ 数据。 + + +注意,图像模型每条样本大小相同,图像经过变换后大小一致,因此经过的计算路径基本相同,计算速度和显存占用波动较小,可以从若干个batch的数据中采样得到当前的训练性能数据。而NLP模型由于样本长度不定,计算路径和显存占用也不相同,因此只能完整运行若干个轮次后,统计速度和显存消耗。 +显存分配是特别耗时的操作,因此Fluid默认会占用所有可用显存空间形成显存池,用以加速计算过程中的显存分配。如果需要统计模型真实显存消耗,可设置环境变量`FLAGS_fraction_of_gpu_memory_to_use=0.0`,观察最大显存开销。 + + +测试过程 +"""""""""""" + +- CPU 单机单线程测试 + +测试CPU上单线程的性能,先设置CUDA的环境变量为空,``CUDA_VISIBLE_DEVICES=``,并通过环境变量关闭OpenMP和MKL的多线程 ``OMP_NUM_THREADS=1``, ``MKL_NUM_THREADS=1;``。 +然后代码中设置为使用CPUPlace,如果使用Paddle代码库中的脚本,只需要命令行参数传入 use_gpu=False即可。 + +.. code-block:: python + + >>> import paddle.fluid as fluid + >>> place = fluid.CPUPlace() + +.. code:: bash + + docker run -it --name CASE_NAME --security-opt seccomp=unconfined -v $PWD/benchmark:/benchmark paddlepaddle/paddle:latest-dev /bin/bash + + +- GPU 单机单卡测试 + +本教程使用了Cuda8, Cudnn7.0.1。来源为:code `nvidia/cuda:8.0-cudnn7-devel-ubuntu16.04` + +.. code:: bash + + nvidia-docker run -it --name CASE_NAME --security-opt seccomp=unconfined -v $PWD/benchmark:/benchmark -v /usr/lib/x86_64-linux-gnu:/usr/lib/x86_64-linux-gnu paddlepaddle/paddle:latest-dev /bin/bash +在单卡上测试,设置CUDA的环境变量使用一块GPU,``CUDA_VISIBLE_DEVICES=0`` +然后代码中设置为使用CUDAPlace,如果使用Paddle代码库中的脚本,只需要命令行参数传入 use_gpu=True即可。 + +.. code-block:: python + + >>> import paddle.fluid as fluid + >>> place = fluid.CUDAPlace(0) // 0 指第0块GPU + + +测试结果 +"""""""""""" + +本教程对比相同环境下的Fluid0.12.0和TensorFlow1.4.0的性能表现。 +硬件环境为 CPU: Intel(R) Xeon(R) CPU E5-2660 v4 @ 2.00GHz, GPU: TITAN X(Pascal) 12G x 1, Nvidia-Driver 384.90。 +系统环境为Ubuntu 16.04.3 LTS, 本文中采用了docker环境,系统版本为nvidia-docker17.05.0-ce。 +测试的Fluid版本为\ `v.0.12.0 `__ 。 +TensorFlow版本为\ `v.1.4.0-rc1 `__ 。 +使用的脚本和配置见\ `benchmark `__ 。 +图表中统计单位为samples/秒。 + +- CPU 单机单线程测试结果 + + ================ ==================== =================== + Speed Fluid CPU TensorFlow CPU + ================ ==================== =================== + mnist 1298.75 samples/s 637.57 samples/s + VGG-16 0.4147 images/s 0.1229 images/s + Resnet-50 1.6935 images/s 0.3657 images/s + Stacked-LSTM 472.3225 words/s 48.2293words/s + Seq2Seq 217.1655 words/s 28.6164 words/s + ================ ==================== =================== + +- GPU 单机单卡测试结果 + + =============== ===================== ================= + Speed Fluid GPU TensorFlow GPU + =============== ===================== ================= + mnist 19710.90 samples/s 15576.3 samples/s + VGG-16 59.83327 images/s 40.9967 images/s + Resnet-50 105.84412 97.8923 images/s + Stacked-LSTM 1319.99315 1608.2526 words/s + Seq2Seq 7147.89081 6845.1161 words/s + =============== ===================== ================= diff --git a/doc/fluid/new_docs/advanced_usage/deploy/anakin_arm_benchmark.md b/doc/fluid/new_docs/advanced_usage/deploy/anakin_arm_benchmark.md new file mode 100644 index 0000000000..08ea379f81 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/anakin_arm_benchmark.md @@ -0,0 +1,56 @@ +# Anakin ARM 性能测试 + +## 测试环境和参数: ++ 测试模型Mobilenetv1, mobilenetv2, mobilenet-ssd ++ 采用android ndk交叉编译,gcc 4.9,enable neon, ABI: armveabi-v7a with neon -mfloat-abi=softfp ++ 测试平台 + - 荣耀v9(root): 处理器:麒麟960, 4 big cores in 2.36GHz, 4 little cores in 1.8GHz + - nubia z17:处理器:高通835, 4 big cores in 2.36GHz, 4 little cores in 1.9GHz + - 360 N5:处理器:高通653, 4 big cores in 1.8GHz, 4 little cores in 1.4GHz ++ 多线程:openmp ++ 时间:warmup10次,运行10次取均值 ++ ncnn版本:来源于github的master branch中commits ID:307a77f04be29875f40d337cfff6df747df09de6(msg:convert LogisticRegressionOutput)版本 ++ TFlite版本:来源于github的master branch中commits ID:65c05bc2ac19f51f7027e66350bc71652662125c(msg:Removed unneeded file copy that was causing failure in Pi builds)版本 + +在BenchMark中本文将使用**`ncnn`**、**`TFlite`**和**`Anakin`**进行性能对比分析 + +## BenchMark model + +> 注意在性能测试之前,请先将测试model通过[External Converter](#10003)转换为Anakin model +> 对这些model,本文在ARM上进行多线程的单batch size测试。 + +- [Mobilenet v1](#11) *caffe model 可以在[这儿](https://github.com/shicai/MobileNet-Caffe)下载* +- [Mobilenet v2](#22) *caffe model 可以在[这儿](https://github.com/shicai/MobileNet-Caffe)下载* +- [mobilenet-ssd](#33) *caffe model 可以在[这儿](https://github.com/chuanqi305/MobileNet-SSD)下载* + +### mobilenetv1 + + |platform | Anakin (1) | Anakin (2) | Anakin (4) | ncnn (1) | ncnn (2) | ncnn (4) | TFlite (1) | TFlite (2) | TFlite (4)| + |:---: | :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| + |麒麟960|107.7ms|61.1ms|38.2ms|152.8ms|85.2ms|51.9ms|152.6ms|nan|nan| + |高通835|105.7ms|63.1ms|~~46.8ms~~|152.7ms|87.0ms|~~92.7ms~~|146.9ms|nan|nan| + |高通653|120.3ms|64.2ms|46.6ms|202.5ms|117.6ms|84.8ms|158.6ms|nan|nan| + +### mobilenetv2 + + |platform | Anakin (1) | Anakin (2) | Anakin (4) | ncnn (1) | ncnn (2) | ncnn (4) | TFlite (1) | TFlite (2) | TFlite (4)| + |:---: | :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| + |麒麟960|93.1ms|53.9ms|34.8ms|144.4ms|84.3ms|55.3ms|100.6ms|nan|nan| + |高通835|93.0ms|55.6ms|41.1ms|139.1ms|88.4ms|58.1ms|95.2ms|nan|nan| + |高通653|106.6ms|64.2ms|48.0ms|199.9ms|125.1ms|98.9ms|108.5ms|nan|nan| + +### mobilenet-ssd + + |platform | Anakin (1) | Anakin (2) | Anakin (4) | ncnn (1) | ncnn (2) | ncnn (4) | TFlite (1) | TFlite (2) | TFlite (4)| + |:---: | :---: | :---: | :---:| :---:| :---:| :---:| :---:| :---:| :---:| + |麒麟960|213.9ms|120.5ms|74.5ms|307.9ms|166.5ms|104.2ms|nan|nan|nan| + |高通835|213.0ms|125.7ms|~~98.4ms~~|292.9ms|177.9ms|~~167.8ms~~|nan|nan|nan| + |高通653|236.0ms|129.6ms|96.0ms|377.7ms|228.9ms|165.0ms|nan|nan|nan + +## How to run those Benchmark models? + +1. 首先, 使用[External Converter](../docs/Manual/Converter_en.md)对caffe model 进行转换 +2. 然后将转换后的Anakin model和编译好的benchmark_arm 二进制文件通过'adb push'命令上传至测试机 +3. 接着在测试机含有Anakin model的目录中运行'./benchmark_arm ./ anakin_model.anakin.bin 1 10 10 1' 命令 +4. 最后,终端显示器上将会打印该模型的运行时间 +5. 其中运行命令的参数个数和含义可以通过运行'./benchmark_arm'看到 diff --git a/doc/fluid/new_docs/advanced_usage/deploy/anakin_example.md b/doc/fluid/new_docs/advanced_usage/deploy/anakin_example.md new file mode 100644 index 0000000000..e6b9e18fe2 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/anakin_example.md @@ -0,0 +1,28 @@ +# Example +Anakin目前只支持NCHW的格式 +示例文件在test/framework/net下 + +## 在NV的GPU上运行CNN模型 +示例文件为打开example_nv_cnn_net.cpp,整体流程如下: +- 将模型的的path设置为anakin模型的路径,初始化NV平台的图对象。 anakin模型可以通过转换器转化caffe或fluid的模型得到 +- 根据模型设置网络图的输入尺寸,进行图优化 +- 根据优化后的网络图初始化网络执行器 +- 取出网络的输入tensor,将数据拷贝到输入tensor +- 运行推导 +- 取出网络的输出tensor + +以NV平台为例演示Anakin框架的使用方法,注意编译时需要打开GPU编译开关 + +## 在X86上运行RNN模型 +示例文件为example_x86_rnn_net.cpp +整体流程与在NV的GPU上运行CNN模型相似,不同之处如下: +- 使用X86标识初始化图对象和网络执行器对象 +- rnn模型的输入尺寸是可变的,初始化图时的输入维度是维度的最大值,输入维度N代表总的词的个数。还需要设置输入tensor的seq_offset来标示这些词是如何划分为句子的,如{0,5,12}表示共有12个词,其中第0到第4个词是第一句话,第5到第11个词是第二句话 + +以X86平台为例演示Anakin框架的使用方法,注意编译时需要打开X86编译开关 + +## 在NV的GPU上使用Anakin的线程池运行CNN模型 +示例文件为example_nv_cnn_net_multi_thread.cpp ,示例使用worker的同步预测接口 +整体流程与在NV的GPU上运行CNN模型相似,不同之处如下: +- 用模型地址和线程池大小初始化worker对象 +- 将输入tensor注入任务队列,获得输出tensor diff --git a/doc/fluid/new_docs/advanced_usage/deploy/anakin_gpu_benchmark.md b/doc/fluid/new_docs/advanced_usage/deploy/anakin_gpu_benchmark.md new file mode 100644 index 0000000000..667f9396f1 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/anakin_gpu_benchmark.md @@ -0,0 +1,170 @@ +# Anakin GPU Benchmark + +## Machine: + +> CPU: `12-core Intel(R) Xeon(R) CPU E5-2620 v2 @2.10GHz` +> GPU: `Tesla P4` +> cuDNN: `v7` + + +## Counterpart of anakin : + +The counterpart of **`Anakin`** is the acknowledged high performance inference engine **`NVIDIA TensorRT 3`** , The models which TensorRT 3 doesn't support we use the custom plugins to support. + +## Benchmark Model + +The following convolutional neural networks are tested with both `Anakin` and `TenorRT3`. + You can use pretrained caffe model or the model trained by youself. + +> Please note that you should transform caffe model or others into anakin model with the help of [`external converter ->`](../docs/Manual/Converter_en.md) + + +- [Vgg16](#1) *caffe model can be found [here->](https://gist.github.com/jimmie33/27c1c0a7736ba66c2395)* +- [Yolo](#2) *caffe model can be found [here->](https://github.com/hojel/caffe-yolo-model)* +- [Resnet50](#3) *caffe model can be found [here->](https://github.com/KaimingHe/deep-residual-networks#models)* +- [Resnet101](#4) *caffe model can be found [here->](https://github.com/KaimingHe/deep-residual-networks#models)* +- [Mobilenet v1](#5) *caffe model can be found [here->](https://github.com/shicai/MobileNet-Caffe)* +- [Mobilenet v2](#6) *caffe model can be found [here->](https://github.com/shicai/MobileNet-Caffe)* +- [RNN](#7) *not support yet* + +We tested them on single-GPU with single-thread. + +### VGG16 + +- Latency (`ms`) of different batch + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 8.8690 | 8.2815 | +| 2 | 15.5344 | 13.9116 | +| 4 | 26.6000 | 21.8747 | +| 8 | 49.8279 | 40.4076 | +| 32 | 188.6270 | 163.7660 | + +- GPU Memory Used (`MB`) + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 963 | 997 | +| 2 | 965 | 1039 | +| 4 | 991 | 1115 | +| 8 | 1067 | 1269 | +| 32 | 1715 | 2193 | + + +### Yolo + +- Latency (`ms`) of different batch + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 16.4596| 15.2124 | +| 2 | 26.6347| 25.0442 | +| 4 | 43.3695| 43.5017 | +| 8 | 80.9139 | 80.9880 | +| 32 | 293.8080| 310.8810 | + +- GPU Memory Used (`MB`) + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 1569 | 1775 | +| 2 | 1649 | 1815 | +| 4 | 1709 | 1887 | +| 8 | 1731 | 2031 | +| 32 | 2253 | 2907 | + +### Resnet50 + +- Latency (`ms`) of different batch + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 4.2459 | 4.1061 | +| 2 | 6.2627 | 6.5159 | +| 4 | 10.1277 | 11.3327 | +| 8 | 17.8209 | 20.6680 | +| 32 | 65.8582 | 77.8858 | + +- GPU Memory Used (`MB`) + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 531 | 503 | +| 2 | 543 | 517 | +| 4 | 583 | 541 | +| 8 | 611 | 589 | +| 32 | 809 | 879 | + +### Resnet101 + +- Latency (`ms`) of different batch + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 7.5562 | 7.0837 | +| 2 | 11.6023 | 11.4079 | +| 4 | 18.3650 | 20.0493 | +| 8 | 32.7632 | 36.0648 | +| 32 | 123.2550 | 135.4880 | + +- GPU Memory Used (`MB)` + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 701 | 683 | +| 2 | 713 | 697 | +| 4 | 793 | 721 | +| 8 | 819 | 769 | +| 32 | 1043 | 1059 | + +### MobileNet V1 + +- Latency (`ms`) of different batch + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 45.5156 | 1.3947 | +| 2 | 46.5585 | 2.5483 | +| 4 | 48.4242 | 4.3404 | +| 8 | 52.7957 | 8.1513 | +| 32 | 83.2519 | 31.3178 | + +- GPU Memory Used (`MB`) + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 329 | 283 | +| 2 | 345 | 289 | +| 4 | 371 | 299 | +| 8 | 393 | 319 | +| 32 | 531 | 433 | + +### MobileNet V2 + +- Latency (`ms`) of different batch + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 65.6861 | 2.9842 | +| 2 | 66.6814 | 4.7472 | +| 4 | 69.7114 | 7.4163 | +| 8 | 76.1092 | 12.8779 | +| 32 | 124.9810 | 47.2142 | + +- GPU Memory Used (`MB`) + +| BatchSize | TensorRT | Anakin | +| --- | --- | --- | +| 1 | 341 | 293 | +| 2 | 353 | 301 | +| 4 | 385 | 319 | +| 8 | 421 | 351 | +| 32 | 637 | 551 | + +## How to run those Benchmark models? + +> 1. At first, you should parse the caffe model with [`external converter`](https://github.com/PaddlePaddle/Anakin/blob/b95f31e19993a192e7428b4fcf852b9fe9860e5f/docs/Manual/Converter_en.md). +> 2. Switch to *source_root/benchmark/CNN* directory. Use 'mkdir ./models' to create ./models and put anakin models into this file. +> 3. Use command 'sh run.sh', we will create files in logs to save model log with different batch size. Finally, model latency summary will be displayed on the screen. +> 4. If you want to get more detailed information with op time, you can modify CMakeLists.txt with setting `ENABLE_OP_TIMER` to `YES`, then recompile and run. You will find detailed information in model log file. diff --git a/doc/fluid/new_docs/advanced_usage/deploy/anakin_tutorial.md b/doc/fluid/new_docs/advanced_usage/deploy/anakin_tutorial.md new file mode 100644 index 0000000000..5efbc89abd --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/anakin_tutorial.md @@ -0,0 +1,639 @@ +# Anakin 使用教程 ## + +本教程将会简略的介绍Anakin的工作原理,一些基本的Anakin API,以及如何调用这些API。 + +## 内容 ### + +- [Anakin的工作原理](#principle) +- [Anakin APIs](#api) +- [示例代码](#example) + +## Anakin的工作原理 ### + +![Anakin_principle](../pics/anakin_fm_ch.png) + +用Anakin来进行前向计算主要分为三个步骤: + +- 将外部模型通过[Anakin Parser](Converter_ch.md)解析为Anakin模型 + 在使用Anakin之前,用户必须将所有其他模型转换成Anakin模型,我们提供了转换脚本,用户可通过[Anakin Parser](Converter_ch.md)进行模型转换。 +- 生成Anakin计算图 + 加载Anakin模型生成原始计算图,然后需要对原始计算图进行优化。你只需要调用相应的API优化即可。 +- 执行计算图 + Anakin会选择不同硬件平台执行计算图。 + + +## Anakin APIs ### +### Tensor #### + +`Tensor`提供基础的数据操作和管理,为ops提供统一的数据接口。`Tensor`包含以下几个属性: + +- Buffer + 数据存储区 +- Shape + 数据的维度信息 +- Event + 用于异步计算的同步 + + `Tensor` 类包含三个`Shape`对象, 分别是`_shape`, `_valid_shape`和 `offset`。 `_shape`为`tensor`真正空间信息,`_valid_shape`表示当前`tensor`使用的空间信息, `_offset`表示当前`tensor`数据指针相对于真正数据空间的信息。 `Tensor`不同维度与分别与数学中的向量、矩阵等相对应如下表所示。 + + +Dimentions | Math entity | + :----: | :----: +1 | vector +2 | matrix +3 | 3-tensor +n | n-tensor + +#### 声明tensor对象 + +`Tensor`接受三个模板参数: + + +```c++ + template + class Tensor .../* Inherit other class */{ + //some implements + ... + }; +``` + +TargetType是平台类型,如X86,GPU等等,在Anakin内部有相应的标识与之对应;datatype是普通的数据类型,在Anakin内部也有相应的标志与之对应;[LayOutType](#layout)是数据分布类型,如batch x channel x height x width [NxCxHxW], 在Anakin内部用一个struct来标识。 Anakin中数据类型与基本数据类型的对应如下: + +1. TargetType + + Anakin TargetType | platform + :----: | :----:| + NV | NVIDIA GPU + ARM | ARM + AMD | AMD GPU + X86 | X86 + NVHX86 | NVIDIA GPU with Pinned Memory + +2. DataType + +Anakin DataType | C++ | Description +:---: | :---: | :---: | +AK_HALF | short | fp16 +AK_FLOAT | float | fp32 +AK_DOUBLE | double | fp64 +AK_INT8 | char | int8 +AK_INT16 | short | int16 +AK_INT32 | int | int32 +AK_INT64 | long | int64 +AK_UINT8 | unsigned char | uint8 +AK_UINT16 | unsigned short | uint8 +AK_UINT32 | unsigned int | uint32 +AK_STRING | std::string | / +AK_BOOL | bool | / +AK_SHAPE | / | Anakin Shape +AK_TENSOR | / | Anakin Tensor + + +3. LayOutType + +Anakin LayOutType ( Tensor LayOut ) | Tensor Dimention | Tensor Support | Op Support +:---: | :---: | :---: | :---: | +W | 1-D | YES | NO +HW | 2-D | YES | NO +WH | 2-D | YES | NO +NW | 2-D | YES | YES +NHW | 3-D | YES |YES +NCHW ( default ) | 4-D | YES | YES +NHWC | 4-D | YES | NO +NCHW_C4 | 5-D | YES | YES + + +理论上,Anakin支持申明1维以上的tensor,但是对于Anakin中的Op来说,只支持NW、NHW、NCHW、NCHW_C4这四种LayOut,其中NCHW是默认的LayOutType,NCHW_C4是专门针对于int8这种数据类型的。 + + +例子 + +> 下面的代码将展示如何使用tensor, 我们建议先看看这些示例。 + +> 要想获得更多关于tensor的信息, 请参考 *soure_path/core/tensor.h* + +> 1. 使用shape对象初始化tensor +``` c++ + //create a null tensor. A null tensor holds for nothing. + //tensor's buffer is resident at CPU and its datatype is AK_FLOAT. + //tensor's Layout is NCHW(default) + Tensor mytensor; + + //1. using shape object to create a tensor. + Shape shape1(NUM); //1-D shape. NUM is the number of dimention. + Tensor mytensor1(shape1); //1-D tensor. + + // A 4-D shape + Shape shape2(N, C, H, W); // batch x channel x height x width +``` + +>`注意:Shape的维度必须和tensor的`[LayoutType](#layout)`相同,比如Shape(N,C,H,W), 那么Tensor的 LayoutType必须是NCHW,否则会出错。如下列代码所示` + + +```c++ + // A 4-D tensor. + Tensor mytensor2(shape2); //right + + //A 4-D tensor which is resident at GPU and its datatype is AK_INT8 + Tensor mytensor3(shape2); //right + + Tensor mytensor4(shape2); //wrong!! shape's dimetion must be equal to tensor's Layout. + Tensor mytensor5(shape2); //wrong!!!! + +``` + +> 2. 使用现有的数据和shape初始化tensor + +```c++ + + /** + * A construtor of Tensor. + * data_ptr is a pointer to any data type of data + * TargetType is type of a platform [Anakin TargetType] + * id : device id + * shape: a Anakin shape + */ + Tensor(Dtype* data_ptr, TargetType_t target, int id, Shape shape); + + //using existing data feed to a tensor + Tensor mytensor(data_ptr, TargetType, device_id, shape); //shape must has dimention (N, C, H, W). + +``` + +> 3. 使用tensor初始化tensor + +```c++ + Tensor tensor(exist_tensor); +``` + + +> 提示: 你可以用` typedef Tensor Tensor4d_X86 `方便定义tensor + + +#### 填充tensor数据区 + + +填充数据区得看你申明tensor的方式, 下面展示了如何填充tensor的数据区。 + +```c++ +首先来看看tensor的四种声明方式: + +1. Tensor mytensor; +2. Tensor mytensor1(shape1); +3. Tensor mytensor(data_ptr, TargetType, device_id, shape); +4. Tensor tensor(exist_tensor); + + +相关的声明方式的数据填充方法如下: + +1:声明一个空的tensor,此时没有为其分配内存,所以,我们需要手动的为其分配内存。 + + //parama shape + mytensor.re_alloc(Shape shape); + + //Get writable pointer to mytensor. + //parama index (int): where you start to write. + //Dtype is your data type such int, float or double. + Dtype *p = mytensor.mutable_data(index/*=0*/); + //write data to mytensor + for(int i = 0; i < mytensor.size(); i++){ + p[i] = 1.0f; + } + //do something ... + +2: 这种声明方式会自动分配内存 + + //Get writable pointer to mytensor. + //parama index (int): where you start to write. + //Dtype is your data type such int, float or double. + Dtype *p = mytensor1.mutable_data(index/*=0*/); + //write data to mytensor + for(int i = 0; i < mytensor.size(); i++){ + p[i] = 1.0f; + } + //do something ... + + +3:在该种声明方式中,我们仍不需要手动为其分配内存。但在构造函数内部是否为其分配内存,得依情况而定。如果data_ptr和申明的 +tensor都在都一个目标平台上,那么该tensor就会与data_ptr共享内存空间,相反,如果他们不在同一个平台上(如data_ptr在X86上,而 +tensor在GPU上),那么此时tensor就会开辟一个新的内存空间,并将data_ptr所指向的数据拷贝到tensor的buffer中。 + + //Get writable pointer to mytensor. + //parama index (int): where you start to write. + //Dtype is your data type such int, float or double. + Dtype *p = mytensor.mutable_data(index/*=0*/); + //write data to mytensor + for(int i = 0; i < mytensor.size(); i++){ + p[i] = 1.0f; + } + //do something ... + +4:该种方式仍不需要手动分配内存 + + //Get writable pointer to mytensor. + //parama index (int): where you start to write. + //Dtype is your data type such int, float or double. + Dtype *p = mytensor.mutable_data(index/*=0*/); + //write data to mytensor + for(int i = 0; i < mytensor.size(); i++){ + p[i] = 1.0f; + } + //do something ... + + +另外,你还可以获取一个tensor的可读指针,示例如下: + //Get read-only pointer to mytensor. + //parama index (int): where you start to read. + //Dtype is your data type such int, float or double. + Dtype *p = mytensor.data(index/*=0*/); + //do something ... +``` + +如果想更详细的了解tensor,请查阅*soure_path/saber/core/tensor.h* + +#### 获取tensor的shape + +```c++ +//some declarations +// ... +Shape shape = mytensor.shape(); + +//Get a first dimetion size of tesor, if it has. +int d1 = shape[0]; + +//Get a second dimention size of tensor, if it has. +int d2 = shape[1]; + +... + +//Get a n-th dimention size of tensor, if it has. +int dn = shape[n-1]; + + +//Get a tensor's dimention +int dims = mytensor.dims(); + +//Get the size of tensor. +//size = d1 x d2 x ... x dn. +int size = mytensor.size(); + +//Get the size of tensor at interval [Di, Dj) +// form i-th dimention to j-th dimention, but not including the j-th dimention. +// which means di x (di+1) x ... x (dj -1) +int size = mytensor.count(start, end); +``` + +#### 设置tensor的shape + +我们可以用tensor的成员函数set_shape来设置tensor的shape。 下面是set_shape的定义 + + +```c++ +/** + * \brief set a tensor's shape + * \param valid_shape [a Shape object] + * \param shape [a Shape object] + * \param offset [a Shape object] + * \return the status of this operation, that means whether it success * or not. + */ +SaberStatus set_shape(Shape valid_shape, Shape shape = Shape::zero(TensorAPI::layout_dims::value), Shape offset = Shape::minusone(TensorAPI::layout_dims::value)); +``` + +这个成员函数只设置tensor的shape。这些shape对象(valid_shape, shape, offset)的[LayOutType](#layout)必须和当前的tensor的相应三个shape对象的LayOutType相同,如果不同就会出错,返回SaberInvalidValue。 如果相同,那么将成功设置tensor的shape。 + +```c++ + +// some declarations +// ... +//valid_shape, shape , offset are Shape object; +//All these Shape object's LayOutType must be equal to mytensor's. +mytensor.set_shape(valid_shape, shape, offset); + +``` + +#### 重置 tensor的shape + +```c++ +//some declarations +Shape shape, valid_shape, offset; + +//do some initializations +... +mytensor.reshape(valid_shape, shape, offset); +``` + +注意: Reshape操作仍然需要shape的[LayOutType](#layout) 与tensor的相同 + + +### Graph ### + +`Graph`类负责加载Anakin模型生成计算图、对图进行优化、存储模型等操作。 + +#### 图的声明 + +与`Tensor`一样,graph也接受三个模板参数。 + +```c++ + +template +class Graph ... /* inherit other class*/{ + + //some implements + ... + +}; +``` + +前面已经介绍过[TargetType](#target)和[DataType](#datatype)是Anakin内部自定义数据类型。[TargetType](#target)表示平台类型 (如NV、X86), [DataType](#datatype)是Anakin基本数据类型与C++/C中的基本数据类型相对应。 [Precision](#precision)为op所支持的精度类型, 稍后我们在介绍它。 + + +```c++ + +//Create a empty graph object. +Graph graph = Graph tmp(); + +//Create a pointer to a empty graph. +Graph *graph = new Graph(); + +//Create a pointer to a empty graph. +auto graph = new Graph(); + +``` + +#### 加载 Anakin 模型 + +```c++ +//some declarations +... +auto graph = new Graph(); +std::string model_path = "the/path/to/where/your/models/are"; +const char *model_path1 = "the/path/to/where/your/models/are"; + +//Loading Anakin model to generate a compute graph. +auto status = graph->load(model_path); + +//Or this way. +auto status = graph->load(model_path1); +//Check whether load operation success. +if(!status){ + std::cout << "error" << endl; + //do something... +} + +``` + +#### 优化计算图 + +```c++ +//some declarations +... +//Load graph. +... +//According to the ops of loaded graph, optimize compute graph. +graph->Optimize(); + +``` + +> 注意: 第一次加载原始图,必须要优化。 + +#### 保存模型 + +你可以在任何时候保存模型, 特别的, 你可以保存一个优化的模型,这样,下次再加载模型时,就不必进行优化操作。 + + +```c++ +//some declarations +... +//Load graph. +... +// save a model +//save_model_path: the path to where your model is. +auto status = graph->save(save_model_path); + +//Checking +if(!status){ + cout << "error" << endl; + //do somethin... +} +``` + +#### 重新设置计算图里的tensor的shape + +```c++ +//some declarations +... +//Load graph. +... +vector shape{10, 256, 256, 10}; +//input_name : std::string. +//Reshape a tensor named input_name. +graph->Reshape(input_name, shape);//Note: shape is a vector, not a Shape object. +``` + +#### 设置 batch size + +`Graph` 支持重新设置batch size的大小。 + +```c++ +//some declarations +... +//Load graph. +... +//input_name : std::string. +//Reset a tensor named input_name. +int new_batch_size = 4; +graph->ResetBatchSize(input_name, new_batch_size); +``` + +### Net ### + + +`Net` 是计算图的执行器。你可以通过Net对象获得输入和输出 +#### Creating a graph executor + +`Net`接受四个模板参数。 + + +```c++ +template +class Net{ + //some implements + ... + +}; +``` +由于有些Op可能支持多种精度,我们可以通过Precision来指定。OpRunType表示同步或异步类型,异步是默认类型。OpRunType::SYNC表示同步,在GPU上只有单个流;OpRunType::ASYNC表示异步,在GPU上有多个流并以异步方式执行。实际上,Precision和OpRunType都是enum class, 详细设计请参考*source_root/framework/core/types.h*. + + +1. Precision + +Precision | Op support +:---: | :---: +Precision::INT4 | NO +Precision::INT8 | NO +Precision::FP16 | NO +Precision::FP32 | YES +Precision::FP64 | NO + +现在Op的精度只支持FP32, 但在将来我们会支持剩下的Precision. + + + +2. OpRunType + +OpRunType | Sync/Aync |Description +:---: | :---: | :---: +OpRunType::SYNC | Synchronization | single-stream on GPU +OpRunType::ASYNC | Asynchronization | multi-stream on GPU + +用graph对象创建一个执行器。 +```c++ +//some declarations +... +//Create a pointer to a graph. +auto graph = new Graph(); +//do something... +... + +//create a executor +Net executor(*graph); + +``` + +#### 获取输入输出tensor + + +获取输入输出tensor,并填充输入tensor的buffer。如果想要获取输入和输出tensor,那么必须指定输入的名字,如"input_0", "input_1", "input_2", ..., 必须传入如上字符串才能够获得输入tensor。另外,如果想知道input_i对应哪个输入,你需要去dash board查看,如何使用dash board请看[Anakin Parser](Converter_ch.md)。请看如下示例代码 + +```c++ +//some declaratinos +... + +//create a executor +//TargetType is NV [NVIDIA GPU] +Net executor(*graph); + +//Get the first input tensor. +//The following tensors(tensor_in0, tensor_in2 ...) are resident at GPU. +//Note: Member function get_in returns an pointer to tensor. +Tensor* tensor_in0 = executor.get_in("input_0"); + +//If you have multiple input tensors +//You just type this code below. +Tensor* tensor_in1 = executor.get_in("input_1"); +... +auto tensor_inn = executor.get_in("input_n"); +``` + +当得到输入tensor之后,就可以填充它的数据区了。 + +```c++ +//This tensor is resident at GPU. +auto tensor_d_in = executor.get_in("input_0"); + +//If we want to feed above tensor, we must feed the tensor which is resident at host. And then copy the host tensor to the device's one. + +//using Tensor4d = Tensor; +Tensor4d tensor_h_in; //host tensor; +//Tensor tensor_h_in; + +//Allocate memory for host tensor. +tensor_h_in.re_alloc(tensor_d_in->valid_shape()); +//Get a writable pointer to tensor. +float *h_data = tensor_h_in.mutable_data(); + +//Feed your tensor. +/** example +for(int i = 0; i < tensor_h_in.size(); i++){ + h_data[i] = 1.0f; +} +*/ +//Copy host tensor's data to device tensor. +tensor_d_in->copy_from(tensor_h_in); + +// And then +``` + + +类似的,我们可以利用成员函数get_out来获得输出tensor。但与获得输入tensor不同的是, 我们需要指定输入tensor结点的名字,这个可以从dash board中看到,请从[Anakin Parser](Converter_ch.md)中查看dash board的使用方法。假如有个输出结点叫pred_out, 那么我们可以通过如下代码获得相应的输出tensor: +```c++ +//Note: this tensor are resident at GPU. +Tensor* tensor_out_d = executor.get_out("pred_out"); + +``` + + +#### Executing graph + + +当一切准备就绪后,我们就可以执行真正的计算了! +```c++ +executor.prediction(); +``` + +## 示例代码 ## + +下面的例子展示了如何调用Anakin。 + +在这儿之前, 请确保你已经有了Anakin模型。如果还没有,那么请使用[Anakin Parser](Converter_ch.md)转换你的模型。 + +### Single-thread + +单线程例子在 *source_root/test/framework/net/net_exec_test.cpp`* + +```c++ + +std::string model_path = "your_Anakin_models/xxxxx.anakin.bin"; +// Create an empty graph object. +auto graph = new Graph(); +// Load Anakin model. +auto status = graph->load(model_path); +if(!status ) { + LOG(FATAL) << " [ERROR] " << status.info(); +} +// Reshape +graph->Reshape("input_0", {10, 384, 960, 10}); +// You must optimize graph for the first time. +graph->Optimize(); +// Create a executer. +Net net_executer(*graph); + +//Get your input tensors through some specific string such as "input_0", "input_1", and +//so on. +//And then, feed the input tensor. +//If you don't know Which input do these specific string ("input_0", "input_1") correspond with, you can launch dash board to find out. +auto d_tensor_in_p = net_executer.get_in("input_0"); +Tensor4d h_tensor_in; +auto valid_shape_in = d_tensor_in_p->valid_shape(); +for (int i=0; icopy_from(h_tensor_in); + +//Do inference. +net_executer.prediction(); + +//Get result tensor through the name of output node. +//And also, you need to see the dash board again to find out how many output nodes are and remember their name. + +//For example, you've got a output node named obj_pre_out +//Then, you can get an output tensor. +auto d_tensor_out_0_p = net_executer.get_out("obj_pred_out"); //get_out returns a pointer to output tensor. +auto d_tensor_out_1_p = net_executer.get_out("lc_pred_out"); //get_out returns a pointer to output tensor. +//...... +// do something else ... +//... +//save model. +//You might not optimize the graph when you load the saved model again. +std::string save_model_path = model_path + std::string(".saved"); +auto status = graph->save(save_model_path); +if (!status ) { + LOG(FATAL) << " [ERROR] " << status.info(); +} + +``` diff --git a/doc/fluid/new_docs/advanced_usage/deploy/build_and_install_lib_cn.rst b/doc/fluid/new_docs/advanced_usage/deploy/build_and_install_lib_cn.rst new file mode 100644 index 0000000000..3884284ea0 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/build_and_install_lib_cn.rst @@ -0,0 +1,99 @@ +.. _install_or_build_cpp_inference_lib: + +安装与编译C++预测库 +=========================== + +直接下载安装 +------------- + +====================== ======================================== +版本说明 C++预测库 +====================== ======================================== +cpu_avx_mkl `fluid.tgz `_ +cpu_avx_openblas `fluid.tgz `_ +cpu_noavx_openblas `fluid.tgz `_ +cuda7.5_cudnn5_avx_mkl `fluid.tgz `_ +cuda8.0_cudnn5_avx_mkl `fluid.tgz `_ +cuda8.0_cudnn7_avx_mkl `fluid.tgz `_ +cuda9.0_cudnn7_avx_mkl `fluid.tgz `_ +====================== ======================================== + +从源码编译 +---------- +用户也可以从 PaddlePaddle 核心代码编译C++预测库,只需在编译时配制下面这些编译选项: + +================= ========= +选项 值 +================= ========= +CMAKE_BUILD_TYPE Release +FLUID_INSTALL_DIR 安装路径 +WITH_FLUID_ONLY ON(推荐) +WITH_SWIG_PY OFF(推荐 +WITH_PYTHON OFF(推荐) +WITH_GPU ON/OFF +WITH_MKL ON/OFF +================= ========= + +建议按照推荐值设置,以避免链接不必要的库。其它可选编译选项按需进行设定。 + +下面的代码片段从github拉取最新代码,配制编译选项(需要将PADDLE_ROOT替换为PaddlePaddle预测库的安装路径): + + .. code-block:: bash + + pip install paddlepaddle-gpu + PADDLE_ROOT=/path/of/capi + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + mkdir build + cd build + cmake -DFLUID_INSTALL_DIR=$PADDLE_ROOT \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_FLUID_ONLY=ON \ + -DWITH_SWIG_PY=OFF \ + -DWITH_PYTHON=OFF \ + -DWITH_MKL=OFF \ + -DWITH_GPU=OFF \ + .. + make + make inference_lib_dist + +成功编译后,使用C++预测库所需的依赖(包括:(1)编译出的PaddlePaddle预测库和头文件;(2)第三方链接库和头文件;(3)版本信息与编译选项信息) +均会存放于PADDLE_ROOT目录中。目录结构如下: + + .. code-block:: text + + PaddleRoot/ + ├── CMakeCache.txt + ├── paddle + │   └── fluid + │   ├── framework + │   ├── inference + │   ├── memory + │   ├── platform + │   ├── pybind + │   └── string + ├── third_party + │   ├── boost + │   │   └── boost + │   ├── eigen3 + │   │   ├── Eigen + │   │   └── unsupported + │   └── install + │   ├── gflags + │   ├── glog + │   ├── mklml + │   ├── protobuf + │   ├── snappy + │   ├── snappystream + │   └── zlib + └── version.txt + +version.txt 中记录了该预测库的版本信息,包括Git Commit ID、使用OpenBlas或MKL数学库、CUDA/CUDNN版本号,如: + + .. code-block:: text + + GIT COMMIT ID: c95cd4742f02bb009e651a00b07b21c979637dc8 + WITH_MKL: ON + WITH_GPU: ON + CUDA version: 8.0 + CUDNN version: v5 diff --git a/doc/fluid/new_docs/advanced_usage/deploy/convert_paddle_to_anakin.md b/doc/fluid/new_docs/advanced_usage/deploy/convert_paddle_to_anakin.md new file mode 100644 index 0000000000..56ca582b2b --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/convert_paddle_to_anakin.md @@ -0,0 +1,73 @@ +# 模型转换指南 + +Anakin 支持不同框架的模型预测。但由于格式的差别,Anakin 需要您预先转换模型。本文档介绍如何转换模型。 + +## 简介 + +Anakin 模型转换器输入支持 Caffe 和 Fluid 两种格式的预测模型,模型包含网络结构(model 或 prototxt)和权重参数(param 或 caffemodel)。 + +模型转换的输出是一个 bin 文件,它作为 Anakin 框架的 graph 参数导入。 + +您还可以使用模型转换器的 launch board 功能生成网络结构的 HTML 预览。 + + +## 系统要求 + +- python 2.7+ +- pyyaml +- flask +- protobuf 3.5+ + + +## 用法 + +### 1、环境 +转换器所需的依赖标注于 *系统要求* 一节。 + +### 2、配置 +您需要对 *config.yaml* 文件进行修改以告知您的需求。工程中给出了 *config.yaml* 示例,下面作进一步说明。 + +#### config.yaml +```bash +OPTIONS: + Framework: CAFFE # 依框架类型填写 CAFFE 或 FLUID + SavePath: ./output # 转换结束后模型的保存位置 + ResultName: googlenet # 输出模型的名字 + Config: + LaunchBoard: ON # 是否生成网络结构预览页面 + Server: + ip: 0.0.0.0 + port: 8888 # 从一个可用端口访问预览页面 + OptimizedGraph: # 当您使用了 Anakin 框架的 Optimized 功能时,才应该打开此项 + enable: OFF + path: /path/to/anakin_optimized_anakin_model/googlenet.anakin.bin.saved + LOGGER: + LogToPath: ./log/ # 生成日志的路径 + WithColor: ON + +TARGET: + CAFFE: + # 当 Framework 为 CAFFE 时需填写 + ProtoPaths: + - /path/to/caffe/src/caffe/proto/caffe.proto + PrototxtPath: /path/to/your/googlenet.prototxt + ModelPath: /path/to/your/googlenet.caffemodel + + FLUID: + # 当 Framework 为 FLUID 时需填写 + Debug: NULL + ProtoPaths: + - / + PrototxtPath: /path/to/fluid/inference_model + ModelPath: /path/to/fluid/inference_model + # ... +``` + +### 3、转换 +在完成配置文件的修改后,您只需执行 ```python converter.py``` 就可以进行模型转换了。 + + +### 4、预览 +最后一步,就是在浏览器中查看令人振奋的转换结果!网址是在 *config.yaml* 中配置的,例如 http://0.0.0.0:8888 。 + +> 注意:若您使用了默认的 IP 地址 0.0.0.0,请在预览时使用真实的服务器地址 real_ip:port 替代它。 diff --git a/doc/fluid/new_docs/advanced_usage/deploy/how_to_add_anakin_op.md b/doc/fluid/new_docs/advanced_usage/deploy/how_to_add_anakin_op.md new file mode 100644 index 0000000000..f2783eb9f5 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/how_to_add_anakin_op.md @@ -0,0 +1,405 @@ +# 如何增加新的Operator + +## 基本概念 + +简单介绍下几个同Operator相关的基本概念,详情请参考设计文档。 + +```framework```: 上层的逻辑代码,负责从parser中获取参数及weights,添加op时主要修改framework/operator目录下的内容。 + +```saber```: 底层的实现代码,Anakin通过saber封装了不同的backends,不同的实现(impl)分别特化出自己的实现,外层framework通过不同的template进入各自的impl完成调用。各个op的parameter放在saber/saber_funcs_param.h文件中,增加op主要修改saber/funcs下的内容。 + +saber的文件结构: +* saber/funcs下的是各个funcs的外部接口,这一层的op与具体的设备实现无关,只与各op完成的功能有关。由于跟实现(impl)无关,本层文件明均不带impl。 +* saber/funcs/impl下是各个op的impl声明,特定设备需要完成该层声明的特化版本,如saber/funcs/impl/x86实现了上一层impl声明的x86特化版本,saber/funcs/impl/cuda实现了上一层impl声明的NV特化版本。当增加新的backends时需要特化出新的实现。本层代码同实现相关,均带有```impl_```前缀。 +* saber/funcs/impl/cuda/base/cuda_c内有cuda```.cu```扩展名的文件,添加cuda的kernel需要在该文件目录下添加。 +* saber/funcs/impl/cuda/base/sass 内有不同架构的汇编代码编译的静态库。 + +### 涉及到的基类及各个类之前的关系 + +简单介绍相关的基类 + +* ```anakin::Operator```: framework的operator基类,位于framework/core/operator/operator.h + +* ```anakin::saber::BaseFunc```: saber对外的op接口基类,提供统一的对外接口,位于saber/funcs/base.h。BaseFunc的```compute_output_shape```接口只根据input的shape和param的参数计算输出的shape,并通过```tensor```的```set_shape```接口(只设置shape,不分配空间)设置到output中。```operator()```接口为各个op的计算接口。 + +* ```ankain::saber::ImplBase```: saber设备实现的op的接口,所有设备相关实现的基类。位于saber/funcs/impl/impl_base.h。实现版本中这里分为两类,一类以```vender_```为前缀,带有```vender_```代码意为使用第三方库来实现该op,如cudnn的conv,或mkl的conv等等,这类op的性能我们难以调优,因此单独列为一类。另一类是带有源码的saber实现,这些实现都带有```saber_```为前缀,此类实现带有源码,能够通过后续优化不断提升性能,实现起名时需要注意这一点。 + +## 添加operator + +添加一个新的op需要以下几步: + +1. 添加saber的param +2. 定义saber的Operator类 +3. 定义新的impl声明 +3. 完成新的impl实现 +4. 增加framework的实现或特化 + +接下来就针对这几步,以一个简单例子为例介绍实现。 + +例如我们要添加新的Mul op。给出计算公式如下:$$Out = alpha \dot X * Y$$ + +### 为operator增加param + +涉及到的文件:```saber/saber_funcs_param.h```。如果之前已经存在需要添加的op的param,这一步可以跳过。 +这里```XXXParam```是一个```struct```。包含一个无参数的构造函数,含参数的构造函数,复制构造函数,```operator=()```及```operator==()```。 +``` +template // 能够获得target, datatype, layout +struct MulParam{ + MulParam() + : alpha(0) + {} + MulParam(float alpha_in) + : alpha(alpha_in) + {} + MulParam(const MulParam& right) + : alpha(right.alpha) + {} + MulParam &operator=(const MulParam &right) { + alpha = right.alpha; + } + bool operator==(const MulParam &right) { + return alpha == right.alpha; + } + float alpha; +}; +``` + +### 定义Operator类 +涉及到的文件:```saber/funcs/mul.h```。如果之前定义过该op的类,这里需要修改输入的impl定义头文件。 +下面给出一个相对完整的定义结构供参考。 +``` +//不同的设备需要包含对应的operator实现.[详见](#impl) +#ifdef NVIDIA_GPU +#include "saber/funcs/impl/cuda/saber_mul.h" +#include "saber/funcs/impl/cuda/vender_mul.h" +#endif +//如果一个设备现在还没有对应的operator实现,需要包含声明。[详见](#declare) +#ifdef USE_X86_PLACE +#include "saber/funcs/impl/impl_mul.h" +#endif +namespace anakin { +namespace saber { +template +class Mul : public BaseFunc< + Tensor, + Tensor, + Tensor, + ImplBase, MulParam> { +public: + using BaseFunc< + Tensor, + Tensor, + Tensor, + ImplBase, MulParam>::BaseFunc; + Mul() = default; + typedef Tensor InDataTensor; + typedef Tensor OutDataTensor; + typedef Tensor OpTensor; + typedef MulParam Param_t; + typedef std::vector Input_v; + typedef std::vector Output_v; + typedef std::vector Shape_v; + + virtual SaberStatus compute_output_shape(const Input_v &input, + Output_v &output, Param_t ¶m) override { + //计算输出的shape, + Shape output_shape = (input[0]->valid_shape()); + /* code */ + return output[0]->set_shape(output_shape); + } + virtual SaberStatus init_impl(ImplEnum implenum) override { + // 不同设备均使用此init_impl, 此接口创建对应impl的实现。 + switch (implenum) { + case VENDER_IMPL: + this->_impl.push_back(new VenderMul ); + return SaberSuccess; + case SABER_IMPL: + this->_impl.push_back(new SaberMul ); + return SaberSuccess; + default: + return SaberUnImplError; + } + } +private: + virtual void pick_best_static() override { + if (true) // some condition? + this->_best_impl = this->_impl[0]; + } + virtual void pick_best_specify(ImplEnum implenum) override { + this->_best_impl = this->_impl[0]; + } +}; +} // namespace saber +} // namespace anakin +``` + +### 为operator增加新的impl声明 + +涉及的文件:```saber/funcs/impl/impl_mul.h```。不同的设备都特化同一个声明,特化版本放在对应的文件夹下,这里的声明就是给出所有设备的统一声明。下面给出一个参考。 +``` +#include "saber/funcs/impl/impl_macro.h" +namespace anakin{ +namespace saber{ +DEFINE_OP_CLASS(Mul, MulParam); // 第一个参数是op的名字,第二个是对应param的名字 +} +} +``` + +### 完成新的operator特定后端实现 + +涉及的文件:```saber/funcs/impl/xxx/vender_mul.h```或```saber/funcs/impl/xxx/saber_mul.h``` +这里```xxx```指代特定的一种设备。```vender```是指的使用第三方库实现的op,```saber```指的源码实现的op。这里以cuda的vender实现为例,简单介绍一下特化出的函数的几个基本接口。 + +``` +// include 对应的声明 +#include "saber/funcs/impl/impl_mul.h" + +namespace anakin{ +namespace saber{ +template +class VenderMul : + public ImplBase< + Tensor, + Tensor, + Tensor, + MulParam > > +{ +public: + typedef Tensor DataTensor_in; + typedef Tensor DataTensor_out; + typedef Tensor OpTensor; + typedef typename DataTensor_in::Dtype InDataType; + typedef typename DataTensor_out::Dtype OutDataType; + typedef typename OpTensor::Dtype OpDataType; + VenderMul(){} + ~VenderMul() {} + + virtual SaberStatus init(const std::vector& inputs, + std::vector& outputs, + MulParam& param, Context& ctx) { + this->_ctx = ctx; + create(inputs, outputs, param, ctx); + } + + virtual SaberStatus create(const std::vector& inputs, + std::vector& outputs, + MulParam& param, Context& ctx) { + // set内部参数 + } + + virtual SaberStatus dispatch(const std::vector& inputs, + std::vector& outputs, + MulParam& param) { + // dispatch kernel. + } + +private: +}; +} +} +``` +```init```和```create```的区别:```init```接口是第一次初始化op的时候进入的接口,此函数只在第一次初始化op时调用,这个接口一般放一些只需要执行一次的代码,如malloc或者create之类的函数。```create```函数除了第一次init执行外,在输入发生变化或者param发生变化时会再次触发,create一般放置set函数,设置内部变量,当input发生变化时这里执行一些同input或weights直接相关的代码。但create因为触发位置在网络内,如果```create```函数执行了一些严重耗时的操作,这里会拖慢整个op的执行时间,需要慎重选择操作放置的位置。 +### 添加framework的特化 + +涉及的文件:```framework/operators/mul.h```和```framework/operators/mul.cpp```。 +这里简单介绍下如果添加或修改framework内的operator + +``` +#include "framework/core/base.h" +#include "framework/core/data_types.h" +#include "framework/core/operator/operator.h" +#include "utils/logger/logger.h" +#include "saber/funcs/mul.h" // 需要包对应的saber头文件 +namespace anakin { +namespace ops { +template +class MulHelper; + +template +class Mul : public Operator { +public: + Mul() {} + /// forward impl + virtual void operator() (OpContext &ctx, + const std::vector >& ins, + std::vector >& outs) { + LOG(ERROR) << "Not Impl Yet Operator power::type>().type_info()<<">"; + } + friend class MulHelper; +}; +template +class MulHelper : public OperatorHelper { +public: + MulHelper() = default; + ~MulHelper(); + Status InitParam() override; + + Status Init(OpContext &ctx, + const std::vector >& ins, + std::vector >& outs) override; + Status InferShape(const std::vector >& ins, + std::vector >& outs) override; + +public: + saber::MulParam> _param_mul; + saber::Mul _funcs_mul; +}; +} +} /* namespace anakin */ +``` +对应的```.cpp```文件如下: +``` +#include "framework/operators/mul.h" + +namespace anakin { +namespace ops { + +#ifdef USE_CUDA +template<> +void Mul::operator()( + OpContext& ctx, + const std::vector >& ins, + std::vector >& outs) { + auto* impl = + static_cast*>(this->_helper); + auto& param = + static_cast*>(this->_helper)->_param_mul; + impl->_funcs_mul(ins, outs, param, ctx); +} +#endif + +template +Status MulHelper::InitParam() { + auto alpha = GET_PARAMETER(float, alpha); + MulParam> param_mul(alpha); + _param_mul = param_mul; + return Status::OK(); +} + +template +Status MulHelper::Init(OpContext& ctx, + const std::vector >& ins, + std::vector >& outs) { + + SABER_CHECK(_funcs_mul.init(ins, outs, _param_mul, SPECIFY, VENDER_IMPL, ctx)); + return Status::OK(); +} + +template +Status MulHelper::InferShape(const + std::vector >& ins, + std::vector >& outs) { + SABER_CHECK(_funcs_mul.compute_output_shape(ins, outs, _param_mul)); + return Status::OK(); +} + +#ifdef USE_CUDA +template class MulHelper; +#endif +#ifdef USE_ARM_PLACE +template class MulHelper; +#endif +// register helper +#ifdef USE_CUDA +ANAKIN_REGISTER_OP_HELPER(Mul, MulHelper, NV, AK_FLOAT, Precision::FP32); +#endif +#ifdef USE_ARM_PLACE +ANAKIN_REGISTER_OP_HELPER(Mul, MulHelper, ARM, AK_FLOAT, Precision::FP32); +#endif +//! register op +ANAKIN_REGISTER_OP(Mul) +.Doc("Mul operator") +#ifdef USE_CUDA +.__alias__("mul") +#endif +#ifdef USE_ARM_PLACE +.__alias__("mul") +#endif +.num_in(1) +.num_out(1) +.Args("alpha", " alpha of Mul "); //注册 + +} /* namespace ops */ + +} /* namespace anakin */ +``` + +## 实现单元测试 +涉及的文件:```test/saber/xxx/test_saber_funcs_mul_xxx.cpp``` +在对应的test下需要添加新的单元测试 + +``` +TEST(TestSaberFuncNV, test_depthwise_conv) { + + // init tensors and some param. + + // start Reshape & doInfer + Context ctx1(0, 1, 1); + + // create param + MulParam > param(alpha); + + std::vector*> input; + std::vector*> output; + + // create saber op + Mul mul; + + // compute output shape + mul.compute_output_shape(input, output, param); + + // re_alloc output tensors memory based on output shape + output[0]->re_alloc(output[0]->shape()); + + // init saber op(calling init and create) + mul.init(input, output, param, SPECIFY, VENDER_IMPL, ctx1); + + // call operator() + mul(input, output, param, ctx1); + + // cuda specified, record events + cudaStream_t cuda_stream = ctx1.get_compute_stream(); + output[0]->record_event(cuda_stream); + output_dev.sync(); + + // param changed + param.alpha = 2.0; + // auto calling saber op(create and dispatch) + mul(input, output, param, ctx1); + + cudaDeviceSynchronize(); + CUDA_CHECK(cudaPeekAtLastError()); +} + +int main(int argc, const char** argv){ + anakin::saber::Env::env_init(); + + // initial logger + //logger::init(argv[0]); + InitTest(); + RUN_ALL_TESTS(argv[0]); + return 0; +} + +``` +## 调试及注意事项 + +一个op需要有对外的op接口和内部实现,由于存在saber/funcs/impl的非特化版本声明,当有op在某种设备下没有对应实现时,也能够编译,但此时是没有任何实现的空实现, diff --git a/doc/fluid/new_docs/advanced_usage/deploy/how_to_support_new_device_in_anakin.md b/doc/fluid/new_docs/advanced_usage/deploy/how_to_support_new_device_in_anakin.md new file mode 100644 index 0000000000..a1f75f5e95 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/how_to_support_new_device_in_anakin.md @@ -0,0 +1,459 @@ +# 如何支持一个新的设备 + +## 概览 + +添加一个新的设备需要以下3个步骤: + +* [在`CMakeList`中添加设备的支持](#0001) +* [在`saber`中添加设备的实现](#0002) +* [在`framework`中添加设备的具体化或实例化](#0003) + +假设新设备的名称为`TNEW`, 以下将以这个设备名称进行演示。 + +## 在`CMakeList`中添加设备的支持 ## + +* 修改根目录`CMakeList.txt` +```cmake +#select the plantform to build +anakin_option(USE_GPU_PLACE "Select the build mode for GPU place." NO) +anakin_option(USE_X86_PLACE "Select the build mode for X86 place." NO) +anakin_option(USE_ARM_PLACE "Select the build mode for ARM place." NO) +anakin_option(USE_TNEW_PLACE "Select the build mode for ARM place." YES) +``` + +* 修改`saber/CMakeList.txt` + +根据新增设备的目录完善`saber`目录下的`CMakeList.txt`。 +```cmake +if(USE_TNEW_PLACE) + anakin_fetch_files_with_suffix(${ANAKIN_SABER}/core/impl/tnew "cpp" ANAKIN_SABER_BASE_SRC) + anakin_fetch_files_with_suffix(${ANAKIN_SABER}/funcs/impl/tnew "cpp" ANAKIN_SABER_BASE_SRC) +endif() +``` + +* 修改`test/CMakeList.txt` + +新增设备的单测文件放在`test/saber/tnew`目录下,修改`test`目录下的`CMakeList.txt`。 +```cmake +if(USE_TNEW_PLACE) + anakin_fetch_files_with_suffix(${ANAKIN_UNIT_TEST}/saber/tnew "cpp" ANAKIN_TEST_CASE_SRC) +endif() +``` + +* 修改`cmake/anakin_config.h.in` +```c++ +// plantform to use +#cmakedefine USE_GPU_PLACE + +#cmakedefine USE_X86_PLACE + +#cmakedefine USE_ARM_PLACE + +#cmakedefine USE_TNEW_PLACE +``` + +* 其他依赖和编译选项 +修改`cmake`目录下的`compiler_options.cmake`和`find_modules.cmake` + + +## 在`saber`中添加设备的实现 ## +`saber`是`Anakin`的基础计算库,对外提供设备无关的统一的API,设备相关的实现都会封装到`TargetWrapper`中。 + +### 在`saber/saber_types.h`中添加设备 + +```c++ +enum TargetTypeEnum { + eINVALID = -1, + eNV = 1, + eAMD = 2, + eARM = 3, + eX86 = 4, + eNVHX86 = 5, + eTNEW = 6 +}; + +typedef TargetType NV; +typedef TargetType ARM; +typedef TargetType AMD; +typedef TargetType X86; +typedef TargetType TNEW; + +``` + +### 在`saber/core`中添加设备的实现 + +1. 在`target_traits.h`中添加新设备 + +* 增加设备类型 +```c++ +struct __cuda_device{}; +struct __arm_device{}; +struct __amd_device{}; +struct __x86_device{}; +struct __tnew_device{}; +``` + +* `TargetTypeTraits`模板具体化 +```c++ +template <> +struct TargetTypeTraits { + typedef __xxx_target target_category;//根据实际设备是host端还是device端进行选择 + typedef __tnew_device target_type; +}; +``` + +2. 在`data_traits.h`中特化`DataTrait`模板类 + +如果设备需要特殊的数据类型,则特化出设备的`DataTrait`类的实现,例如opencl数据类型的实现如下: +```c++ +#ifdef USE_OPENCL +struct ClMem{ + ClMem(){ + dmem = nullptr; + offset = 0; + } + + ClMem(cl_mem* mem_in, int offset_in = 0) { + dmem = mem_in; + offset = offset_in; + } + + ClMem(ClMem& right) { + dmem = right.dmem; + offset = right.offset; + } + + ClMem& operator=(ClMem& right) { + this->dmem = right.dmem; + this->offset = right.offset; + return *this; + } + + ClMem& operator+(int offset_in) { + this->offset += offset_in; + return *this; + } + + int offset{0}; + cl_mem* dmem; +}; + +template <> +struct DataTrait { + typedef ClMem Dtype; + typedef float dtype; +}; + +template <> +struct DataTrait { + typedef ClMem Dtype; + typedef double dtype; +}; + +template <> +struct DataTrait { + typedef ClMem Dtype; + typedef char dtype; +}; +#endif //use_opencl +``` + +3. 在`target_wrapper.h`中特化`TargetWrapper`模板类 + +特化`TargetWrapper`模板类,在`target_wrapper.h`中声明函数,具体如下: +```c++ +template <> +struct TargetWrapper { //根据TNEW的具体类型修改__xxx_target,__host_target或者__device_target + + typedef xxx_event event_t; //根据设备实现xxx_event + typedef xxx_stream stream_t; //根据设备实现xxx_stream + + static void get_device_count(int& count); + + static void set_device(int id); + + //We should add strategy to avoid malloc directly + static void mem_alloc(void** ptr, size_t n); + + static void mem_free(void* ptr); + + static void mem_set(void* ptr, int value, size_t n); + + static void create_event(event_t& event, bool flag = false); + + static void create_stream(stream_t& stream); + + static void create_stream_with_flag(stream_t& stream, unsigned int flag); + + static void create_stream_with_priority(stream_t& stream, unsigned int flag, int priority); + + static void destroy_stream(stream_t& stream); + + static void destroy_event(event_t& event); + + static void record_event(event_t& event, stream_t stream); + + static void query_event(event_t& event); + + static void sync_event(event_t& event); + + static void sync_stream(event_t& event, stream_t& stream); + + static void sync_memcpy(void* dst, int dst_id, const void* src, int src_id, \ + size_t count, __DtoD); + + static void async_memcpy(void* dst, int dst_id, const void* src, int src_id, \ + size_t count, stream_t& stream, __DtoD); + + static void sync_memcpy(void* dst, int dst_id, const void* src, int src_id, \ + size_t count, __HtoD); + + static void async_memcpy(void* dst, int dst_id, const void* src, int src_id, \ + size_t count, stream_t& stream, __HtoD); + + static void sync_memcpy(void* dst, int dst_id, const void* src, int src_id, \ + size_t count, __DtoH); + + static void async_memcpy(void* dst, int dst_id, const void* src, int src_id, \ + size_t count, stream_t& stream, __DtoH); + + static void sync_memcpy_p2p(void* dst, int dst_dev, const void* src, \ + int src_dev, size_t count); + + static void async_memcpy_p2p(void* dst, int dst_dev, const void* src, \ + int src_dev, size_t count, stream_t& stream); + + static int get_device_id(); +}; + +``` + +4. 在`impl/`目录下添加设备目录和实现 + +在`saber/core/impl`目录下添加设备目录`tnew`。 +* 实现`TargetWrapper`结构体中各函数的定义。 +如果`TargetWrapper`的实现与默认的模板类一致,则不用特化出该类。 + +```c++ +typedef TargetWrapper TNEW_API; +void TNEW_API::get_device_count(int &count) { + // add implementation +} + +void TNEW_API::set_device(int id){ + // add implementation +} + +void TNEW_API::mem_alloc(void** ptr, size_t n){ + // add implementation +} + +void TNEW_API::mem_free(void* ptr){ + if(ptr != nullptr){ + // add implementation + } +} +... + +``` + +* 特化实现`device.h`中的`Device` + +```c++ +template <> +void Device::create_stream() { + // add implementation +} + +template <> +void Device::get_info() { + + // add implementation +} + +``` + +### 在`saber/funcs`中实现设备相关的op + +参考[如何增加新的Operator](addCustomOp.md) + + +## 在`framework`中添加设备的具体化或实例化 ## + +### `framework/core` + +* `net.cpp`中添加实例化 + +```c++ +#ifdef USE_TNEW_PLACE +template class Net; +template class Net; +#endif +``` + +* `operator_func.cpp`中添加实例化 + +```c++ +#ifdef USE_TNEW_PLACE +template class OperatorFunc; +#endif +``` + +* `worker.cpp`中添加实例化 + +```c++ +#ifdef USE_TNEW_PLACE +template class Worker; +template class Worker; +#endif +``` + +* `operator_attr.cpp`中添加实例化 + +```c++ +template +OpAttrWarpper& OpAttrWarpper::__alias__(const std::string& op_name); +template +OpAttrWarpper& OpAttrWarpper::__alias__(const std::string& op_name); +template +OpAttrWarpper& OpAttrWarpper::__alias__(const std::string& op_name); +``` + +* `parameter.h`中添加设备的实现 + +```c++ +#ifdef USE_TNEW_PLACE +template +class PBlock { +public: + typedef Tensor4d::type> type; + + PBlock() { + _inner_tensor = std::make_shared(); + } + ... +} +#endif //TNEW +``` + +* `type_traits_extend.h`中添加设备的实现 + +```c++ +template<> +struct target_host { + typedef saber::X86 type; //根据TNEW选择正确的host type +}; +``` + +### `framework/graph` + +* `graph.cpp`中添加实例化 + +```c++ + #ifdef USE_TNEW_PLACE + template class Graph; + template class Graph; + template class Graph; + #endif +``` + +### `framework/model_parser` + +* `parser.cpp`中添加实例化 + +```c++ + #ifdef USE_TNEW_PLACE + template + Status load(graph::Graph* graph, + const char* model_path); + template + Status load(graph::Graph* graph, + const char* model_path); + template + Status load(graph::Graph* graph, + const char* model_path); + + template + Status save(graph::Graph* graph, + std::string& model_path); + template + Status save(graph::Graph* graph, + std::string& model_path); + template + Status save(graph::Graph* graph, + std::string& model_path); + + template + Status load(graph::Graph* graph, + std::string& model_path); + template + Status load(graph::Graph* graph, + std::string& model_path); + template + Status load(graph::Graph* graph, + std::string& model_path); + + template + Status save(graph::Graph* graph, + const char* model_path); + template + Status save(graph::Graph* graph, + const char* model_path); + template + Status save(graph::Graph* graph, + const char* model_path); + #endif +``` + +* `model_io.cpp`中添加实例化 + +```c++ +#ifdef USE_TNEW_PLACE +template class NodeIO; +template class NodeIO; +template class NodeIO; +#endif +``` + +### `framework/operators` + +为`framework/operators`目录下所有op添加实例化或具体化 +以`activation.cpp`为例,实例化如下: + +```c++ +#ifdef USE_TNEW_PLACE +INSTANCE_ACTIVATION(TNEW, AK_FLOAT, Precision::FP32); +INSTANCE_ACTIVATION(TNEW, AK_FLOAT, Precision::FP16); +INSTANCE_ACTIVATION(TNEW, AK_FLOAT, Precision::INT8); +template class ActivationHelper; +ANAKIN_REGISTER_OP_HELPER(Activation, ActivationHelper, TNEW, AK_FLOAT, Precision::FP32); +#endif +``` + +如果TNEW设备函数的实现与现有模板实现不一致,可以特化实现如下(以init()为例): +```c++ +#ifdef USE_TNEW_PLACE +INSTANCE_ACTIVATION(TNEW, AK_FLOAT, Precision::FP32); +INSTANCE_ACTIVATION(TNEW, AK_FLOAT, Precision::FP16); +INSTANCE_ACTIVATION(TNEW, AK_FLOAT, Precision::INT8); +template <> +Status ActivationHelper::Init(OpContext &ctx,\ + const std::vector >& ins, \ + std::vector >& outs) { + SABER_CHECK(_funcs_activation.init(ins, outs, _param_activation, SPECIFY, SABER_IMPL, ctx)); //在这里选择实现方式 + return Status::OK(); +} +ANAKIN_REGISTER_OP_HELPER(Activation, ActivationHelper, TNEW, AK_FLOAT, Precision::FP32); +#endif +``` + +在`ANAKIN_REGISTER_OP(Activation)`中添加TNEW的注册 + +```c++ +#ifdef USE_TNEW_PLACE +.__alias__("activation") +#endif +``` + +## 注意事项 +不要修改`Tensor`/`Buffer`/`Env`/`Context`这些类函数的接口和实现 diff --git a/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst b/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst new file mode 100644 index 0000000000..b782242a66 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/index_anakin.rst @@ -0,0 +1,26 @@ +服务器端部署 - Anakin +##################### + + +使用文档 +~~~~~~~ + +.. toctree:: + :maxdepth: 1 + + install_anakin.md + convert_paddle_to_anakin.md + run_anakin_on_arm.md + anakin_tutorial.md + anakin_example.md + anakin_gpu_benchmark.md + anakin_arm_benchmark.md + +开发文档 +~~~~~~~ + +.. toctree:: + :maxdepth: 1 + + how_to_add_anakin_op.md + how_to_support_new_device_in_anakin.md diff --git a/doc/fluid/new_docs/advanced_usage/deploy/index_mobile.rst b/doc/fluid/new_docs/advanced_usage/deploy/index_mobile.rst new file mode 100644 index 0000000000..47df6392c1 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/index_mobile.rst @@ -0,0 +1,9 @@ +移动端部署 +########## + +.. toctree:: + :maxdepth: 2 + + mobile_build.md + mobile_dev.md + diff --git a/doc/fluid/new_docs/advanced_usage/deploy/index_native.rst b/doc/fluid/new_docs/advanced_usage/deploy/index_native.rst new file mode 100644 index 0000000000..a5209e8560 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/index_native.rst @@ -0,0 +1,8 @@ +服务器端部署 - 原生引擎 +####################### + +.. toctree:: + :maxdepth: 2 + + build_and_install_lib_cn.rst + native_infer.rst diff --git a/doc/fluid/new_docs/advanced_usage/deploy/install_anakin.md b/doc/fluid/new_docs/advanced_usage/deploy/install_anakin.md new file mode 100644 index 0000000000..bb7c195030 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/install_anakin.md @@ -0,0 +1,69 @@ +## 从源码编译安装Anakin ## + +我们已经在CentOS 7.3上成功的安装和测试了Anakin,对于其他操作系统,我们将很快支持。 + +### 安装概览 ### + +* [在CentOS上安装 Anakin]() +* [在Ubuntu上安装 Anakin]() +* [在ARM上安装 Anakin](run_on_arm_ch.md) +* [验证安装]() + + +### 在CentOS上安装 Anakin ### +#### 1. 系统要求 #### + +* make 3.82+ +* cmake 2.8.12+ +* gcc 4.8.2+ +* g++ 4.8.2+ +* 其他需要补充的。。。 + +#### 2. 编译CPU版Anakin #### + +暂时不支持 + +#### 3. 编译支持NVIDIA GPU的Anakin #### + +- 3.1. 安装依赖 + - 3.1.1 protobuf + >$ git clone https://github.com/google/protobuf + >$ cd protobuf + >$ git submodule update --init --recursive + >$ ./autogen.sh + >$ ./configure --prefix=/path/to/your/insall_dir + >$ make + >$ make check + >$ make install + >$ sudo ldconfig + + + 如安装protobuf遇到任何问题,请访问[这里](https://github.com/google/protobuf/blob/master/src/README.md) + +- 3.2 CUDA Toolkit + - [CUDA 8.0](https://developer.nvidia.com/cuda-zone) or higher. 具体信息参见[NVIDIA's documentation](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/). + - [cuDNN v7](https://developer.nvidia.com/cudnn). 具体信息参见[NVIDIA's documentation](https://docs.nvidia.com/cuda/cuda-installation-guide-linux/). +- 3.3 编译Anakin + >$ git clone https:/xxxxx + >$ cd anakin + >$ mkdir build + >$ camke .. + >$ make + + +#### 4. 编译支持AMD GPU的Anakin #### + +暂时还不支持 + + +### 在Ubuntu上安装 Anakin ### + +暂时还不支持 + + +### 在ARM上安装 Anakin ### + +暂时还不支持 + +### 验证安装 ### +we are coming soon... diff --git a/doc/fluid/new_docs/advanced_usage/deploy/mobile_build.md b/doc/fluid/new_docs/advanced_usage/deploy/mobile_build.md new file mode 100644 index 0000000000..e515931649 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/mobile_build.md @@ -0,0 +1,59 @@ +# 环境搭建 +## 使用 docker +### 1. 安装 docker +安装 docker 的方式,参考官方文档 [https://docs.docker.com/install/](https://docs.docker.com/install/) +### 2. 使用 docker 搭建构建环境 +首先进入 paddle-mobile 的目录下,执行 `docker build` +以 Linux/Mac 为例 (windows 建议在 'Docker Quickstart Terminal' 中执行) +``` +$ docker build -t paddle-mobile:dev - < Dockerfile +``` +使用 `docker images` 可以看到我们新建的 image +``` +$ docker images +REPOSITORY TAG IMAGE ID CREATED SIZE +paddle-mobile dev 33b146787711 45 hours ago 372MB +``` +### 3. 使用 docker 构建 +进入 paddle-mobile 目录,执行 docker run +``` +$ docker run -it --mount type=bind,source=$PWD,target=/paddle-mobile paddle-mobile:dev +root@5affd29d4fc5:/ # cd /paddle-mobile +# 生成构建 android 产出的 Makefile +root@5affd29d4fc5:/ # rm CMakeCache.txt +root@5affd29d4fc5:/ # cmake -DCMAKE_TOOLCHAIN_FILE=tools/toolchains/arm-android-neon.cmake +# 生成构建 linux 产出的 Makefile +root@5affd29d4fc5:/ # rm CMakeCache.txt +root@5affd29d4fc5:/ # cmake -DCMAKE_TOOLCHAIN_FILE=tools/toolchains/arm-linux-gnueabi.cmake +``` +### 4. 设置编译选项 +可以通过 ccmake 设置编译选项 +``` +root@5affd29d4fc5:/ # ccmake . + Page 1 of 1 + CMAKE_ASM_FLAGS + CMAKE_ASM_FLAGS_DEBUG + CMAKE_ASM_FLAGS_RELEASE + CMAKE_BUILD_TYPE + CMAKE_INSTALL_PREFIX /usr/local + CMAKE_TOOLCHAIN_FILE /paddle-mobile/tools/toolchains/arm-android-neon.cmake + CPU ON + DEBUGING ON + FPGA OFF + LOG_PROFILE ON + MALI_GPU OFF + NET googlenet + USE_EXCEPTION ON + USE_OPENMP OFF +``` +修改选项后,按 `c`, `g` 更新 Makefile +### 5. 构建 +使用 make 命令进行构建 +``` +root@5affd29d4fc5:/ # make +``` +### 6. 查看构建产出 +构架产出可以在 host 机器上查看,在 paddle-mobile 的目录下,build 以及 test/build 下,可以使用 adb 指令或者 scp 传输到 device 上执行 + +## 不使用 docker +不使用 docker 的方法,可以直接用 cmake 生成 makefile 后构建。使用 ndk 构建 android 应用需要正确设置 NDK_ROOT。构建 linux 应用需要安装 arm-linux-gnueabi-gcc 或者类似的交叉编译工具,可能需要设置 CC,CXX 环境变量,或者在 tools/toolchains/ 中修改 arm-linux-gnueabi.cmake,或者增加自己需要的 toolchain file。 diff --git a/doc/fluid/new_docs/advanced_usage/deploy/mobile_dev.md b/doc/fluid/new_docs/advanced_usage/deploy/mobile_dev.md new file mode 100644 index 0000000000..474380f9db --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/mobile_dev.md @@ -0,0 +1,72 @@ +# iOS开发文档 + +## 编译 + +### 一. 使用 build.sh 编译 + +```sh +sh build.sh ios + +# 如果只想编译某个特定模型的 op, 则需执行以下命令 +sh build.sh ios googlenet + +# 在这个文件夹下, 你可以拿到生成的 .a 库 +cd ../build/release/ios/build + +``` + +### 二. 使用 xcode 编译 + +我们提供了 ios 开发更为熟悉的 xcode 编译环境: +在 ios/ 目录下打开 PaddleMobile.xcworkspace 即可编译 PaddleMobile 或者 运行 Demo + +### 三. 集成 + +#### 如使用 c++ 接口 +将 + +``` +libpaddle-mobile.a +io.h +program.h +types.h +lod_tensor.h +tensor.h +``` +拖入工程, io.h 为接口文件, 可在 [github](https://github.com/PaddlePaddle/paddle-mobile/blob/develop/src/io/io.h)上查看接口注释 + +#### 如使用 oc 接口 +将在xcode 编译生成的 +``` +libPaddleMobile.a +PaddleMobile.h +``` +拖入工程, 接口如下: + +``` +/* + 创建单例对象 +*/ ++ (instancetype)sharedInstance; + +/* + load 模型, 开辟内存 +*/ +- (BOOL)load:(NSString *)modelPath andWeightsPath:(NSString *)weighsPath; + +/* + 进行预测, means 和 scale 为训练模型时的预处理参数, 如训练时没有做这些预处理则直接使用 predict +*/ +- (NSArray *)predict:(CGImageRef)image means:(NSArray *)means scale:(float)scale; + +/* + 进行预测 +*/ +- (NSArray *)predict:(CGImageRef)image; + +/* + 清理内存 +*/ +- (void)clear; + +``` diff --git a/doc/fluid/new_docs/advanced_usage/deploy/native_infer.rst b/doc/fluid/new_docs/advanced_usage/deploy/native_infer.rst new file mode 100644 index 0000000000..e1eee3f818 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/native_infer.rst @@ -0,0 +1,108 @@ +Paddle 预测 API +=============== + +为了更简单方便的预测部署,Fluid 提供了一套高层 API +用来隐藏底层不同的优化实现。 + +`预测库相关代码 `__ +包括 + +- 头文件 ``paddle_inference_api.h`` 定义了所有的接口 +- 库文件\ ``libpaddle_fluid.so`` 或 ``libpaddle_fluid.a`` +- 库文件 ``libpaddle_inference_api.so`` 或 + ``libpaddle_inference_api.a`` + +编译和依赖可以参考 :ref:`install_or_build_cpp_inference_lib` 。 + +下面是一些 API 概念的介绍 + +PaddleTensor +------------ + +PaddleTensor 定义了预测最基本的输入输出的数据格式,其定义是 + +.. code:: cpp + + struct PaddleTensor { + std::string name; // variable name. + std::vector shape; + PaddleBuf data; // blob of data. + PaddleDType dtype; + }; + +- ``name`` 用于指定输入数据对应的 模型中variable 的名字 + (暂时没有用,但会在后续支持任意 target 时启用) +- ``shape`` 表示一个 Tensor 的 shape +- ``data`` 数据以连续内存的方式存储在\ ``PaddleBuf`` + 中,\ ``PaddleBuf`` + 可以接收外面的数据或者独立\ ``malloc``\ 内存,详细可以参考头文件中相关定义。 +- ``dtype`` 表示 Tensor 的数据类型 + +engine +------ + +高层 API 底层有多种优化实现,我们称之为 engine,目前有三种 engine + +- 原生 engine,由 paddle 原生的 forward operator + 组成,可以天然支持所有paddle 训练出的模型, +- Anakin engine,封装了 + `Anakin `__ + ,在某些模型上性能不错,但只能接受自带模型格式,无法支持所有 paddle + 模型, +- TensorRT mixed engine,用子图的方式支持了 + `TensorRT `__ ,支持所有paddle + 模型,并自动切割部分计算子图到 TensorRT 上加速(WIP) + +其实现为 + +.. code:: cpp + + enum class PaddleEngineKind { + kNative = 0, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. + kAutoMixedTensorRT // Automatically mixing TensorRT with the Fluid ops. + }; + +预测部署过程 +------------ + +总体上分为以下步骤 + +1. 用合适的配置创建 ``PaddlePredictor`` +2. 创建输入用的 ``PaddleTensor``\ ,传入到 ``PaddlePredictor`` 中 +3. 获取输出的 ``PaddleTensor`` ,将结果取出 + +下面完整演示一个简单的模型,部分细节代码隐去 + +.. code:: cpp + + #include "paddle_inference_api.h" + + // 创建一个 config,并修改相关设置 + paddle::NativeConfig config; + config.model_dir = "xxx"; + config.use_gpu = false; + // 创建一个原生的 PaddlePredictor + auto predictor = + paddle::CreatePaddlePredictor(config); + // 创建输入 tensor + int64_t data[4] = {1, 2, 3, 4}; + paddle::PaddleTensor tensor{.name = "", + .shape = std::vector({4, 1}), + .data = PaddleBuf(data, sizeof(data)), + .dtype = PaddleDType::INT64}; + // 创建输出 tensor,输出 tensor 的内存可以复用 + std::vector outputs; + // 执行预测 + CHECK(predictor->Run(slots, &outputs)); + // 获取 outputs ... + +编译时,联编 ``libpaddle_fluid.a/.so`` 和 +``libpaddle_inference_api.a/.so`` 便可。 + +详细代码参考 +------------ + +- `inference + demos `__ +- `复杂单线程/多线程例子 `__ diff --git a/doc/fluid/new_docs/advanced_usage/deploy/run_anakin_on_arm.md b/doc/fluid/new_docs/advanced_usage/deploy/run_anakin_on_arm.md new file mode 100644 index 0000000000..ebeb38f534 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/deploy/run_anakin_on_arm.md @@ -0,0 +1,151 @@ +## 源码编译 Anakin ## + +目前Anakin支持ARM Android平台,采用Android NDK交叉编译工具链,已在mac os和centos上编译和测试通过。 + +### 安装概览 ### + +* [系统需求](#0001) +* [安装第三方依赖](#0002) +* [Anakin源码编译](#0003) +* [验证安装](#0004) + + +### 1. 系统需求 ### + +* 宿主机: linux, mac +* cmake 3.8.2+ +* Android NDK r14, Linux 版本[从这里下载](https://dl.google.com/android/repository/android-ndk-r14b-linux-x86_64.zip) + +### 2. 安装第三方依赖 ### + +- 2.1 protobuf3.4.0 + 源码从这里[下载](https://github.com/google/protobuf/releases/tag/v3.4.0) + - 2.1.1 为宿主机编译protobuf + ```bash + $ tar -xzf protobuf-3.4.0.tar.gz + $ cd protobuf-3.4.0 + $ ./autogen.sh + $ ./configure + $ make + $ make check + $ make install + ``` + 上述 $make install 执行后,可在 /usr/local/include/google 找到 libprotobuf 所需的头文件,将整个google文件夹拷贝至Anakin/third-party/arm-android/protobuf/下, + 如有问题,请点[这里](https://github.com/google/protobuf/blob/v3.4.0/src/README.md)。 + 然后将已经生成文件清除。 + ```bash + $ make distclean + ``` + - 2.1.1 交叉编译Android`armeabi-v7a`的protobuf,注意设置ANDROID_NDK的路径,以及ARCH_ABI、HOSTOSN的值, + ```bash + + $ export ANDROID_NDK=your_ndk_path + $ ARCH_ABI="arm-linux-androideabi-4.9" + $ HOSTOSN="darwin-x86_64" + $ export SYSROOT=$ANDROID_NDK/platforms/android-9/arch-arm + $ export PREBUILT=$ANDROID_NDK/toolchains/$ARCH_ABI + $ export LDFLAGS="--sysroot=$SYSROOT" + $ export LD="$ANDROID_NDK/toolchains/$ARCH_ABI/prebuilt/$HOSTOSN/arm-linux-androideabi/bin/ld $LDFLAGS" + $ export LIBS="-llog $ANDROID_NDK/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/libgnustl_static.a" + $ export CPPFLAGS="" + $ export INCLUDES="-I$ANDROID_NDK/sources/cxx-stl/gnu-libstdc++/4.9/include/ -I$ANDROID_NDK/platforms/android-9/arch-arm/usr/include/ -I$ANDROID_NDK/sources/cxx-stl/gnu-libstdc++/4.9/libs/armeabi-v7a/include/" + $ export CXXFLAGS="-march=armv7-a -mfloat-abi=softfp -DGOOGLE_PROTOBUF_NO_RTTI --sysroot=$SYSROOT" + $ export CCFLAGS="$CXXFLAGS" + $ export CXX="$PREBUILT/prebuilt/$HOSTOSN/bin/arm-linux-androideabi-g++ $CXXFLAGS" + $ export CC="$CXX" + $ export RANLIB="$ANDROID_NDK/toolchains/$ARCH_ABI/prebuilt/$HOSTOSN/bin/arm-linux-androideabi-ranlib" + $ ./autogen.sh + $ ./configure --host=arm-linux-androideabi --with-sysroot=$SYSROOT --enable-cross-compile --with-protoc=protoc --disable-shared CXX="$CXX" CC="$CC" LD="$LD" + $ make + ``` + + 编译生成 *.a 静态库,若希望编译*.so 动态链接库 ,请在./configure参数中改--disable-shared为--disable-static --enable-shared。 + 生成文件在src/.libs/下,将生成的文件拷贝至Anakin/third-party/arm-android/protobuf/lib下。 + 在[cmake](../../cmake/find_modules.cmake)中更新`ARM_RPOTO_ROOT`的路径。 + ```cmake + set(ARM_RPOTO_ROOT "${CMAKE_SOURCE_DIR}/third-party/arm-android/protobuf") + ``` + +- 2.2 opencv 2.4.3+(optional) + Anakin只在examples示例中使用opencv + Android系统的opencv从[这里下载](https://opencv.org/releases.html) + 解压后将 `3rdparty/libs/armeabi-v7a`中的库文件拷贝到`libs/armeabi-v7a` + 在[cmake](../../cmake/find_modules.cmake)中搜索`anakin_find_opencv`, + 并设置 `include_directories` 和 `LINK_DIRECTORIES`为自己安装的库的路径。 + ```cmake + include_directories(${CMAKE_SOURCE_DIR}/third-party/arm-android/opencv/sdk/native/jni/include/) + LINK_DIRECTORIES(${CMAKE_SOURCE_DIR}/third-party/arm-android/opencv/sdk/native/libs/armeabi-v7a/) + ``` +### 3. Anakin源码编译 ### + +#### 编译Android版本 + + 克隆[源码](https://github.com/PaddlePaddle/Anakin/tree/arm) +```bash + cd your_dir + git clone https://github.com/PaddlePaddle/Anakin.git + cd Anakin + git fetch origin arm + git checkout arm + ``` + 修改`android_build.sh` +- 修改NDK路径 + ```bash + #modify "your_ndk_path" to your NDK path + export ANDROID_NDK=your_ndk_path + ``` +- 修改ARM 处理器架构 + 对于32位ARM处理器, 将ANDROID_ABI 设置为 `armeabi-v7a with NEON`, + 对于64位ARM处理器, 可以将ANDROID_ABI 设置为 `armeabi-v7a with NEON`或者`arm64-v8a`。 + 目前我们只支持 `armeabi-v7a with NEON`;`arm64-v8a` 还在开发中。 + ```bash + -DANDROID_ABI="armeabi-v7a with NEON" + ``` +- 设置Android API + 根据Android系统的版本设置API level, 例如API Level 21 -> Android 5.0.1 + ```bash + -DANDROID_NATIVE_API_LEVEL=21 + ``` + +- 选择编译静态库或动态库 + 设置`BUILD_SHARED=NO`编译静态库 + 设置`BUILD_SHARED=YES`编译动态库 + ```bash + -DBUILD_SHARED=NO + ``` +- OpenMP多线程支持 + 设置`USE_OPENMP=YES`开启OpenMP多线程 + ```bash + -DUSE_OPENMP=YES + ``` + +- 编译单测文件 + 设置`BUILD_WITH_UNIT_TEST=YES`将会编译单测文件 + ```bash + -DBUILD_WITH_UNIT_TEST=YES + ``` + +- 编译示例文件 + 设置`BUILD_EXAMPLES=YES`将会编译示例文件 + ```bash + -DBUILD_EXAMPLES=YES + ``` + +- 开启opencv + 如果使用opencv,设置`USE_OPENCV=YES` + ```bash + -DUSE_OPENCV=YES + ``` + +- 开始编译 + 运行脚本 `android_build.sh` 将自动编译Anakin + ```bash + ./android_build.sh + ``` + +### 4. 验证安装 ### + 编译好的库会放在目录`${Anakin_root}/output`下; + 编译好的单测文件会放在`${Anakin_root}/output/unit_test`目录下; + 编译好的示例文件会放在`${Anakin_root}/output/examples`目录下。 + + 对于Android系统,打开设备的调试模式,通过ADB可以访问的目录是`data/local/tmp`,通过ADB push将测试文件、模型和数据发送到设备目录, 运行测试文件。 diff --git a/doc/fluid/new_docs/advanced_usage/development/contribute_to_paddle.md b/doc/fluid/new_docs/advanced_usage/development/contribute_to_paddle.md new file mode 120000 index 0000000000..1126df7a82 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/contribute_to_paddle.md @@ -0,0 +1 @@ +../../../dev/contribute_to_paddle_cn.md \ No newline at end of file diff --git a/doc/fluid/new_docs/advanced_usage/development/cpu_profiling_cn.md b/doc/fluid/new_docs/advanced_usage/development/cpu_profiling_cn.md new file mode 120000 index 0000000000..1381a3b05f --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/cpu_profiling_cn.md @@ -0,0 +1 @@ +../../../howto/optimization/cpu_profiling_cn.md \ No newline at end of file diff --git a/doc/fluid/new_docs/advanced_usage/development/gpu_profiling_cn.rst b/doc/fluid/new_docs/advanced_usage/development/gpu_profiling_cn.rst new file mode 100644 index 0000000000..f2396716bd --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/gpu_profiling_cn.rst @@ -0,0 +1,242 @@ +============ +GPU性能调优 +============ + +.. contents:: + +此教程将向您分步介绍如何使用内置的定时工具、 **nvprof** 或 **nvvp** 来运行性能分析和调优。 + +- 什么是性能分析? +- 为什么需要性能分析? +- 如何进行性能分析? +- 性能分析工具介绍 +- 详细教程 +- 性能分析小技巧 + +什么是性能分析? +================ +在软件工程的范畴里,性能分析(Profiling)是一个动态程序分析的术语,它可以指测量一个程序的空间(内存)复杂度或时间复杂度, +也可以说是某些特定指令的使用情况,或者是函数调用的频率和耗时等。通常情况下,分析得到的信息用于协助进行程序的优化。 + +简单来说,性能分析工具是用于给应用程序的性能做定量分析的。如果想很好的理解程序的行为,那程序分析工具是必不可少的利器。简单的性能分析,可以告诉您某个操作到底花了多长时间?而更深入的分析,甚至能解释为什么某个操作花了很长时间? + +为什么需要性能分析? +============================ +训练好一个深层神经网络通常要耗费非常长的时间,所以性能也就逐步变成了深度学习领域最重要的指标。 +而优化性能的首要任务,是需要了解哪些步骤拖慢了整体。 +如果某一块根本就不怎么耗时,那也就不需要急着优化性能啦! + +如何进行性能分析? +======================== +为了达到性能最优,您可以采用下面五个步骤: + +- 对代码进行性能分析 +- 找到运行慢的部分 +- 找到运行慢的原因 +- 修改成更快的版本 +- 再次对代码进行性能分析 + +Usually, processor has two key performance limits include float point throughput and +memory throughput. For GPU, it also need more parallelism to fulfill its potential. +This is why they can be so fast. + +通常情况下,处理器有两个关键性能限制:一个是浮点计算量,另一个是内存操作量。 +GPU则还需要高并行性,才能发挥其全部能力。这正是它们速度快的原因。 + +性能分析工具介绍 +====================== +就通常的GPU性能分析来说,市面上已经有NVIDIA或第三方提供的众多工具。 + +**nvprof** 是Nvidia性能分析工具, **nvvp** 则是带GUI的Nvidia可视化性能分析工具。 +在这个教程中,我们主要会介绍nvprof和nvvp。 + +:code:`test_GpuProfiler` from :code:`paddle/legacy/math/tests` directory will be used to evaluate +above profilers. + +:code:`paddle/legacy/math/test` 目录中的 :code:`test_GpuProfiler` 就是用于展示上述分析工具的用法。 + +.. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp + :language: c++ + :lines: 137-151 + :linenos: + +上述的代码片段包含了两种方法,您可以任意使用一个或两个来对感兴趣的代码段做性能分析。 + +1. :code:`REGISTER_TIMER_INFO` 是一个内置的定时器封装,可以用来计算CPU函数或cuda内核的时间消耗。 + +2. :code:`REGISTER_GPU_PROFILER` is a general purpose wrapper object of :code:`cudaProfilerStart` and :code:`cudaProfilerStop` to avoid +program crashes when CPU version of PaddlePaddle invokes them. + +3. :code:`REGISTER_GPU_PROFILER` 是一个封装对象,封装了 :code:`cudaProfilerStart` 和 :code:`cudaProfileStop` 两个操作;同时其内部实现可以避免纯CPU版本PaddlePaddle在执行本语句时发生崩溃。 + +您会在接下来的部分中获得更多的细节介绍。 + +详细教程 +============ + +内置定时器 +------------ + +如果想要启用PaddlePaddle的内置定时器,您首先需要在相关代码段中加入 :code:`REGISTER_TIMER_INFO`。 +接下来就可以使用 :code:`printStatus` 或者 :code:`printAllStatus` 函数来将信息输出到界面中。 +下面举个简单的例子: + +1. 加入 :code:`REGISTER_TIMER_INFO` 和 :code:`printAllStatus` 函数(如高亮部分)。 + + .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp + :language: c++ + :lines: 137-151 + :emphasize-lines: 8-12,14 + :linenos: + +2. cmake配置中将 **WITH_TIMER** 打开,重新编译PaddlePaddle。 + + .. code-block:: bash + + cmake .. -DWITH_TIMER=ON + make + +3. 执行您的代码,并观察结果(如高亮部分)。 + + .. code-block:: bash + :emphasize-lines: 1,12-15 + + > ./paddle/legacy/math/tests/test_GpuProfiler + I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/legacy/math/tests/test_GpuProfiler + I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions + I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. + [==========] Running 1 test from 1 test case. + [----------] Global test environment set-up. + [----------] 1 test from Profiler + [ RUN ] Profiler.BilinearFwdBwd + I1117 11:13:42.845310 2522362816 test_GpuProfiler.cpp:114] Enable GPU Profiler Stat: [testBilinearFwdBwd] "numSamples = 10, channels = 16, im + gSizeX = 64, imgSizeY = 64" + I1117 11:13:42.850154 2522362816 ThreadLocal.cpp:37] thread use undeterministic rand seed:20659751 + I1117 11:13:42.981501 2522362816 Stat.cpp:130] ======= StatSet: [GlobalStatInfo] status ====== + I1117 11:13:42.981539 2522362816 Stat.cpp:133] Stat=testBilinearFwdBwd total=136.141 avg=136.141 max=136.141 min=136.141 count=1 + I1117 11:13:42.981572 2522362816 Stat.cpp:141] ======= BarrierStatSet status ====== + I1117 11:13:42.981575 2522362816 Stat.cpp:154] -------------------------------------------------- + [ OK ] Profiler.BilinearFwdBwd (136 ms) + [----------] 1 test from Profiler (136 ms total) + + [----------] Global test environment tear-down + [==========] 1 test from 1 test case ran. (136 ms total) + [ PASSED ] 1 test. + +nvprof 工具 +---------------- + +要使用命令行分析工具 **nvprof**,您按如下步骤操作即可: + +1. 将 :code:`REGISTER_GPU_PROFILER` 函数加到代码中(参考强调部分)。 + + .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp + :language: c++ + :lines: 137-151 + :emphasize-lines: 6-7 + :linenos: + +2. cmake中将 **WITH_PROFILER** 配置打开,重新编译PaddlePaddle。 + + .. code-block:: bash + + cmake .. -DWITH_PROFILER=ON + make + +3. 使用 **nvprof** 来分析执行文件。 + + .. code-block:: bash + + nvprof ./paddle/legacy/math/tests/test_GpuProfiler + +然后,您就能获得如下的分析结果: + +.. code-block:: bash + + ==78544== Profiling application: ./paddle/legacy/math/tests/test_GpuProfiler + ==78544== Profiling result: + Time(%) Time Calls Avg Min Max Name + 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] + 26.07% 9.0957ms 1 9.0957ms 9.0957ms 9.0957ms KeBilinearInterpBw + 23.78% 8.2977ms 1 8.2977ms 8.2977ms 8.2977ms KeBilinearInterpFw + 22.55% 7.8661ms 2 3.9330ms 1.5798ms 6.2863ms [CUDA memcpy DtoH] + + ==78544== API calls: + Time(%) Time Calls Avg Min Max Name + 46.85% 682.28ms 8 85.285ms 12.639us 682.03ms cudaStreamCreateWithFlags + 39.83% 580.00ms 4 145.00ms 302ns 550.27ms cudaFree + 9.82% 143.03ms 9 15.892ms 8.7090us 142.78ms cudaStreamCreate + 1.23% 17.983ms 7 2.5690ms 23.210us 6.4563ms cudaMemcpy + 1.23% 17.849ms 2 8.9247ms 8.4726ms 9.3768ms cudaStreamSynchronize + 0.66% 9.5969ms 7 1.3710ms 288.43us 2.4279ms cudaHostAlloc + 0.13% 1.9530ms 11 177.54us 7.6810us 591.06us cudaMalloc + 0.07% 1.0424ms 8 130.30us 1.6970us 453.72us cudaGetDevice + 0.04% 527.90us 40 13.197us 525ns 253.99us cudaEventCreateWithFlags + 0.03% 435.73us 348 1.2520us 124ns 42.704us cuDeviceGetAttribute + 0.03% 419.36us 1 419.36us 419.36us 419.36us cudaGetDeviceCount + 0.02% 260.75us 2 130.38us 129.32us 131.43us cudaGetDeviceProperties + 0.02% 222.32us 2 111.16us 106.94us 115.39us cudaLaunch + 0.01% 214.06us 4 53.514us 28.586us 77.655us cuDeviceGetName + 0.01% 115.45us 4 28.861us 9.8250us 44.526us cuDeviceTotalMem + 0.01% 83.988us 4 20.997us 578ns 77.760us cudaSetDevice + 0.00% 38.918us 1 38.918us 38.918us 38.918us cudaEventCreate + 0.00% 34.573us 31 1.1150us 279ns 12.784us cudaDeviceGetAttribute + 0.00% 17.767us 1 17.767us 17.767us 17.767us cudaProfilerStart + 0.00% 15.228us 2 7.6140us 3.5460us 11.682us cudaConfigureCall + 0.00% 14.536us 2 7.2680us 1.1490us 13.387us cudaGetLastError + 0.00% 8.6080us 26 331ns 173ns 783ns cudaSetupArgument + 0.00% 5.5470us 6 924ns 215ns 2.6780us cuDeviceGet + 0.00% 5.4090us 6 901ns 328ns 3.3320us cuDeviceGetCount + 0.00% 4.1770us 3 1.3920us 1.0630us 1.8300us cuDriverGetVersion + 0.00% 3.4650us 3 1.1550us 1.0810us 1.2680us cuInit + 0.00% 830ns 1 830ns 830ns 830ns cudaRuntimeGetVersion + + +nvvp 工具 +-------------- + +如果想使用可视化的分析器 **nvvp**,您可以导入 :code:`nvprof -o ...` 的输出,或者从工具的界面里运行您的应用。 + +**备注: nvvp 也支持CPU的性能分析** (需在nvvp界面中选上才能开启) + +.. image:: nvvp1.png + :align: center + :scale: 33% + +从内核函数的角度, **nvvp** 可以精确说明一个长耗时操作的具体原因。 +同时,如下图所示, **nvvp** 的内核block使用情况、寄存器使用情况和共享内存使用情况能让我们对GPU的整体使用有更好的理解。 + + +.. image:: nvvp2.png + :align: center + :scale: 33% + +而从应用的角度, **nvvp** 可以帮您提供一些定位性能瓶颈的建议。 +例如,下图中就展示了一些关于内存数据迁徙和计算资源利用率的建议,为您做性能调优提供了方向。 + +.. image:: nvvp3.png + :align: center + :scale: 33% + +.. image:: nvvp4.png + :align: center + :scale: 33% + +性能分析小技巧 +================== + +- 开始阶段,从 **nvprof** 和 **nvvp** 的输出信息入手是个不错的选择。 +- 接下来可以考虑下时间线的分析。 +- 如果真想挖掘内核深处的某个秘密,您最好先确认:这一块的耗时比例真的太高,值得深入分析。 +- 可能的情况下,试着让输出的分析数据和理论值对应。 + + 1) 例如,如果我知道内核花了10ms来移动1GB数据,那我会期望分析工具统计到速度是100GB/s。 + 2) 若有不一致之处,很有可能实际应用就是没有按照您的预期情况运行。 +- 了解您的硬件:如果您的GPU理论可以达到6 TFLOPs(6万亿次浮点运算每秒),而当前已经有5.5 TFLOPs了,那估计这里的潜力就没啥好挖的了…… + +性能分析是性能优化的关键一步。有的时候简简单单的改变就能在性能上产生明显的优化效果! +当然,具体情况因人而异。 + +参考资料 +=========== +Jeremy Appleyard, `GPU Profiling for Deep Learning `_, 2015 diff --git a/doc/fluid/new_docs/advanced_usage/development/host_memory_profiling_cn.md b/doc/fluid/new_docs/advanced_usage/development/host_memory_profiling_cn.md new file mode 120000 index 0000000000..904968ba4a --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/host_memory_profiling_cn.md @@ -0,0 +1 @@ +../../../howto/optimization/host_memory_profiling_cn.md \ No newline at end of file diff --git a/doc/fluid/new_docs/advanced_usage/development/new_op.md b/doc/fluid/new_docs/advanced_usage/development/new_op.md new file mode 120000 index 0000000000..dce0348585 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/new_op.md @@ -0,0 +1 @@ +../../../dev/new_op_cn.md \ No newline at end of file diff --git a/doc/fluid/new_docs/advanced_usage/development/nvvp1.png b/doc/fluid/new_docs/advanced_usage/development/nvvp1.png new file mode 100644 index 0000000000..1af23ac3c5 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/nvvp1.png differ diff --git a/doc/fluid/new_docs/advanced_usage/development/nvvp2.png b/doc/fluid/new_docs/advanced_usage/development/nvvp2.png new file mode 100644 index 0000000000..177c9db708 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/nvvp2.png differ diff --git a/doc/fluid/new_docs/advanced_usage/development/nvvp3.png b/doc/fluid/new_docs/advanced_usage/development/nvvp3.png new file mode 100644 index 0000000000..d8f393667d Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/nvvp3.png differ diff --git a/doc/fluid/new_docs/advanced_usage/development/nvvp4.png b/doc/fluid/new_docs/advanced_usage/development/nvvp4.png new file mode 100644 index 0000000000..51f2f3e183 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/nvvp4.png differ diff --git a/doc/fluid/new_docs/advanced_usage/development/pprof_1.png b/doc/fluid/new_docs/advanced_usage/development/pprof_1.png new file mode 100644 index 0000000000..8e9edbf377 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/pprof_1.png differ diff --git a/doc/fluid/new_docs/advanced_usage/development/pprof_2.png b/doc/fluid/new_docs/advanced_usage/development/pprof_2.png new file mode 100644 index 0000000000..172ba20399 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/pprof_2.png differ diff --git a/doc/fluid/new_docs/advanced_usage/development/timeline.jpeg b/doc/fluid/new_docs/advanced_usage/development/timeline.jpeg new file mode 100644 index 0000000000..38ec3f80c9 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/timeline.jpeg differ diff --git a/doc/fluid/new_docs/advanced_usage/development/timeline_cn.md b/doc/fluid/new_docs/advanced_usage/development/timeline_cn.md new file mode 120000 index 0000000000..a05540e82a --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/timeline_cn.md @@ -0,0 +1 @@ +../../../howto/optimization/timeline_cn.md \ No newline at end of file diff --git a/doc/fluid/new_docs/advanced_usage/development/tracing.jpeg b/doc/fluid/new_docs/advanced_usage/development/tracing.jpeg new file mode 100644 index 0000000000..3a49fc4f8a Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/development/tracing.jpeg differ diff --git a/doc/fluid/new_docs/advanced_usage/development/write_docs.rst b/doc/fluid/new_docs/advanced_usage/development/write_docs.rst new file mode 120000 index 0000000000..dc536c8bdd --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/development/write_docs.rst @@ -0,0 +1 @@ +../../../dev/write_docs_cn.rst \ No newline at end of file diff --git a/doc/fluid/new_docs/advanced_usage/index.rst b/doc/fluid/new_docs/advanced_usage/index.rst new file mode 100644 index 0000000000..dea7c23661 --- /dev/null +++ b/doc/fluid/new_docs/advanced_usage/index.rst @@ -0,0 +1,23 @@ +######## +进阶使用 +######## + + +.. todo:: + + Complete this guide + +.. toctree:: + :maxdepth: 2 + + deploy/index_native.rst + deploy/index_anakin.rst + deploy/index_mobile.rst + development/contribute_to_paddle.md + development/write_docs.rst + development/new_op.md + development/cpu_profiling_cn.md + development/gpu_profiling_cn.rst + development/host_memory_profiling_cn.md + development/timeline_cn.md + benchmark.rst diff --git a/doc/fluid/new_docs/advanced_usage/pics/anakin_fm_ch.png b/doc/fluid/new_docs/advanced_usage/pics/anakin_fm_ch.png new file mode 100644 index 0000000000..52d4992a22 Binary files /dev/null and b/doc/fluid/new_docs/advanced_usage/pics/anakin_fm_ch.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/image_classification/.gitignore new file mode 100644 index 0000000000..dc7c62b062 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/image_classification/.gitignore @@ -0,0 +1,8 @@ +*.pyc +train.log +output +data/cifar-10-batches-py/ +data/cifar-10-python.tar.gz +data/*.txt +data/*.list +data/mean.meta diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog.png new file mode 100644 index 0000000000..ca8f858a90 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog_cat.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog_cat.png new file mode 100644 index 0000000000..38b21f2160 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/dog_cat.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/fea_conv0.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/fea_conv0.png new file mode 100644 index 0000000000..647c822e52 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/fea_conv0.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/flowers.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/flowers.png new file mode 100644 index 0000000000..04245cef60 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/flowers.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/googlenet.jpeg b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/googlenet.jpeg new file mode 100644 index 0000000000..249dbf96df Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/googlenet.jpeg differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/ilsvrc.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/ilsvrc.png new file mode 100644 index 0000000000..4660ac122e Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/ilsvrc.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception.png new file mode 100644 index 0000000000..9591a0c1e8 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/inception.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet.png new file mode 100644 index 0000000000..77f785e03b Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/lenet.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot.png new file mode 100644 index 0000000000..57e45cc0c2 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/plot.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet.png new file mode 100644 index 0000000000..0aeb4f2546 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet_block.jpg b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet_block.jpg new file mode 100644 index 0000000000..c500eb01a9 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/resnet_block.jpg differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/train_and_test.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/train_and_test.png new file mode 100644 index 0000000000..c6336a9a69 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/train_and_test.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/vgg16.png b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/vgg16.png new file mode 100644 index 0000000000..6270eefcfd Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/image_classification/image/vgg16.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/image_classification/index.md b/doc/fluid/new_docs/beginners_guide/basics/image_classification/index.md new file mode 100644 index 0000000000..ce0d2bb1dc --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/image_classification/index.md @@ -0,0 +1,559 @@ + +# 图像分类 + +本教程源代码目录在[book/image_classification](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +图像相比文字能够提供更加生动、容易理解及更具艺术感的信息,是人们转递与交换信息的重要来源。在本教程中,我们专注于图像识别领域的一个重要问题,即图像分类。 + +图像分类是根据图像的语义信息将不同类别图像区分开来,是计算机视觉中重要的基本问题,也是图像检测、图像分割、物体跟踪、行为分析等其他高层视觉任务的基础。图像分类在很多领域有广泛应用,包括安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。 + + +一般来说,图像分类通过手工特征或特征学习方法对整个图像进行全部描述,然后使用分类器判别物体类别,因此如何提取图像的特征至关重要。在深度学习算法之前使用较多的是基于词袋(Bag of Words)模型的物体分类方法。词袋方法从自然语言处理中引入,即一句话可以用一个装了词的袋子表示其特征,袋子中的词为句子中的单词、短语或字。对于图像而言,词袋方法需要构建字典。最简单的词袋模型框架可以设计为**底层特征抽取**、**特征编码**、**分类器设计**三个过程。 + +而基于深度学习的图像分类方法,可以通过有监督或无监督的方式**学习**层次化的特征描述,从而取代了手工设计或选择图像特征的工作。深度学习模型中的卷积神经网络(Convolution Neural Network, CNN)近年来在图像领域取得了惊人的成绩,CNN直接利用图像像素信息作为输入,最大程度上保留了输入图像的所有信息,通过卷积操作进行特征的提取和高层抽象,模型输出直接是图像识别的结果。这种基于"输入-输出"直接端到端的学习方法取得了非常好的效果,得到了广泛的应用。 + +本教程主要介绍图像分类的深度学习模型,以及如何使用PaddlePaddle训练CNN模型。 + +## 效果展示 + +图像分类包括通用图像分类、细粒度图像分类等。图1展示了通用图像分类效果,即模型可以正确识别图像上的主要物体。 + +![dogCatClassification](./image/dog_cat.png) +

+图1. 通用图像分类展示 +

+ + +图2展示了细粒度图像分类-花卉识别的效果,要求模型可以正确识别花的类别。 + +![flowersClassification](./image/flowers.png) +

+图2. 细粒度图像分类展示 +

+ + +一个好的模型既要对不同类别识别正确,同时也应该能够对不同视角、光照、背景、变形或部分遮挡的图像正确识别(这里我们统一称作图像扰动)。图3展示了一些图像的扰动,较好的模型会像聪明的人类一样能够正确识别。 + +![imageVariations](https://raw.githubusercontent.com/PaddlePaddle/book/develop/03.image_classification/image/variations.png) +

+图3. 扰动图片展示[22] +

+ +## 模型概览 + +图像识别领域大量的研究成果都是建立在[PASCAL VOC](http://host.robots.ox.ac.uk/pascal/VOC/)、[ImageNet](http://image-net.org/)等公开的数据集上,很多图像识别算法通常在这些数据集上进行测试和比较。PASCAL VOC是2005年发起的一个视觉挑战赛,ImageNet是2010年发起的大规模视觉识别竞赛(ILSVRC)的数据集,在本章中我们基于这些竞赛的一些论文介绍图像分类模型。 + +在2012年之前的传统图像分类方法可以用背景描述中提到的三步完成,但通常完整建立图像识别模型一般包括底层特征学习、特征编码、空间约束、分类器设计、模型融合等几个阶段。 +1). **底层特征提取**: 通常从图像中按照固定步长、尺度提取大量局部特征描述。常用的局部特征包括SIFT(Scale-Invariant Feature Transform, 尺度不变特征转换) \[[1](#参考文献)\]、HOG(Histogram of Oriented Gradient, 方向梯度直方图) \[[2](#参考文献)\]、LBP(Local Bianray Pattern, 局部二值模式) \[[3](#参考文献)\] 等,一般也采用多种特征描述子,防止丢失过多的有用信息。 +2). **特征编码**: 底层特征中包含了大量冗余与噪声,为了提高特征表达的鲁棒性,需要使用一种特征变换算法对底层特征进行编码,称作特征编码。常用的特征编码包括向量量化编码 \[[4](#参考文献)\]、稀疏编码 \[[5](#参考文献)\]、局部线性约束编码 \[[6](#参考文献)\]、Fisher向量编码 \[[7](#参考文献)\] 等。 +3). **空间特征约束**: 特征编码之后一般会经过空间特征约束,也称作**特征汇聚**。特征汇聚是指在一个空间范围内,对每一维特征取最大值或者平均值,可以获得一定特征不变形的特征表达。金字塔特征匹配是一种常用的特征聚会方法,这种方法提出将图像均匀分块,在分块内做特征汇聚。 +4). **通过分类器分类**: 经过前面步骤之后一张图像可以用一个固定维度的向量进行描述,接下来就是经过分类器对图像进行分类。通常使用的分类器包括SVM(Support Vector Machine, 支持向量机)、随机森林等。而使用核方法的SVM是最为广泛的分类器,在传统图像分类任务上性能很好。 + +这种方法在PASCAL VOC竞赛中的图像分类算法中被广泛使用 \[[18](#参考文献)\]。[NEC实验室](http://www.nec-labs.com/)在ILSVRC2010中采用SIFT和LBP特征,两个非线性编码器以及SVM分类器获得图像分类的冠军 \[[8](#参考文献)\]。 + +Alex Krizhevsky在2012年ILSVRC提出的CNN模型 \[[9](#参考文献)\] 取得了历史性的突破,效果大幅度超越传统方法,获得了ILSVRC2012冠军,该模型被称作AlexNet。这也是首次将深度学习用于大规模图像分类中。从AlexNet之后,涌现了一系列CNN模型,不断地在ImageNet上刷新成绩,如图4展示。随着模型变得越来越深以及精妙的结构设计,Top-5的错误率也越来越低,降到了3.5%附近。而在同样的ImageNet数据集上,人眼的辨识错误率大概在5.1%,也就是目前的深度学习模型的识别能力已经超过了人眼。 + +![ilsvrc](./image/ilsvrc.png) +

+图4. ILSVRC图像分类Top-5错误率 +

+ +### CNN + +传统CNN包含卷积层、全连接层等组件,并采用softmax多类别分类器和多类交叉熵损失函数,一个典型的卷积神经网络如图5所示,我们先介绍用来构造CNN的常见组件。 + +![cnnStructure](./image/lenet.png) +

+图5. CNN网络示例[20] +

+ +- 卷积层(convolution layer): 执行卷积操作提取底层到高层的特征,发掘出图片局部关联性质和空间不变性质。 +- 池化层(pooling layer): 执行降采样操作。通过取卷积输出特征图中局部区块的最大值(max-pooling)或者均值(avg-pooling)。降采样也是图像处理中常见的一种操作,可以过滤掉一些不重要的高频信息。 +- 全连接层(fully-connected layer,或者fc layer): 输入层到隐藏层的神经元是全部连接的。 +- 非线性变化: 卷积层、全连接层后面一般都会接非线性变化层,例如Sigmoid、Tanh、ReLu等来增强网络的表达能力,在CNN里最常使用的为ReLu激活函数。 +- Dropout \[[10](#参考文献)\] : 在模型训练阶段随机让一些隐层节点权重不工作,提高网络的泛化能力,一定程度上防止过拟合。 + +另外,在训练过程中由于每层参数不断更新,会导致下一次输入分布发生变化,这样导致训练过程需要精心设计超参数。如2015年Sergey Ioffe和Christian Szegedy提出了Batch Normalization (BN)算法 \[[14](#参考文献)\] 中,每个batch对网络中的每一层特征都做归一化,使得每层分布相对稳定。BN算法不仅起到一定的正则作用,而且弱化了一些超参数的设计。经过实验证明,BN算法加速了模型收敛过程,在后来较深的模型中被广泛使用。 + +接下来我们主要介绍VGG,GoogleNet和ResNet网络结构。 + +### VGG + +牛津大学VGG(Visual Geometry Group)组在2014年ILSVRC提出的模型被称作VGG模型 \[[11](#参考文献)\] 。该模型相比以往模型进一步加宽和加深了网络结构,它的核心是五组卷积操作,每两组之间做Max-Pooling空间降维。同一组内采用多次连续的3X3卷积,卷积核的数目由较浅组的64增多到最深组的512,同一组内的卷积核数目是一样的。卷积之后接两层全连接层,之后是分类层。由于每组内卷积层的不同,有11、13、16、19层这几种模型,下图展示一个16层的网络结构。VGG模型结构相对简洁,提出之后也有很多文章基于此模型进行研究,如在ImageNet上首次公开超过人眼识别的模型\[[19](#参考文献)\]就是借鉴VGG模型的结构。 + +![vgg16](./image/vgg16.png) +

+图6. 基于ImageNet的VGG16模型 +

+ +### GoogleNet + +GoogleNet \[[12](#参考文献)\] 在2014年ILSVRC的获得了冠军,在介绍该模型之前我们先来了解NIN(Network in Network)模型 \[[13](#参考文献)\] 和Inception模块,因为GoogleNet模型由多组Inception模块组成,模型设计借鉴了NIN的一些思想。 + +NIN模型主要有两个特点:1) 引入了多层感知卷积网络(Multi-Layer Perceptron Convolution, MLPconv)代替一层线性卷积网络。MLPconv是一个微小的多层卷积网络,即在线性卷积后面增加若干层1x1的卷积,这样可以提取出高度非线性特征。2) 传统的CNN最后几层一般都是全连接层,参数较多。而NIN模型设计最后一层卷积层包含类别维度大小的特征图,然后采用全局均值池化(Avg-Pooling)替代全连接层,得到类别维度大小的向量,再进行分类。这种替代全连接层的方式有利于减少参数。 + +Inception模块如下图7所示,图(a)是最简单的设计,输出是3个卷积层和一个池化层的特征拼接。这种设计的缺点是池化层不会改变特征通道数,拼接后会导致特征的通道数较大,经过几层这样的模块堆积后,通道数会越来越大,导致参数和计算量也随之增大。为了改善这个缺点,图(b)引入3个1x1卷积层进行降维,所谓的降维就是减少通道数,同时如NIN模型中提到的1x1卷积也可以修正线性特征。 + +![inception](./image/inception.png) +

+图7. Inception模块 +

+ +GoogleNet由多组Inception模块堆积而成。另外,在网络最后也没有采用传统的多层全连接层,而是像NIN网络一样采用了均值池化层;但与NIN不同的是,池化层后面接了一层到类别数映射的全连接层。除了这两个特点之外,由于网络中间层特征也很有判别性,GoogleNet在中间层添加了两个辅助分类器,在后向传播中增强梯度并且增强正则化,而整个网络的损失函数是这个三个分类器的损失加权求和。 + +GoogleNet整体网络结构如图8所示,总共22层网络:开始由3层普通的卷积组成;接下来由三组子网络组成,第一组子网络包含2个Inception模块,第二组包含5个Inception模块,第三组包含2个Inception模块;然后接均值池化层、全连接层。 + +![googleNet](./image/googlenet.jpeg) +

+图8. GoogleNet[12] +

+ + +上面介绍的是GoogleNet第一版模型(称作GoogleNet-v1)。GoogleNet-v2 \[[14](#参考文献)\] 引入BN层;GoogleNet-v3 \[[16](#参考文献)\] 对一些卷积层做了分解,进一步提高网络非线性能力和加深网络;GoogleNet-v4 \[[17](#参考文献)\] 引入下面要讲的ResNet设计思路。从v1到v4每一版的改进都会带来准确度的提升,介于篇幅,这里不再详细介绍v2到v4的结构。 + + +### ResNet + +ResNet(Residual Network) \[[15](#参考文献)\] 是2015年ImageNet图像分类、图像物体定位和图像物体检测比赛的冠军。针对训练卷积神经网络时加深网络导致准确度下降的问题,ResNet提出了采用残差学习。在已有设计思路(BN, 小卷积核,全卷积网络)的基础上,引入了残差模块。每个残差模块包含两条路径,其中一条路径是输入特征的直连通路,另一条路径对该特征做两到三次卷积操作得到该特征的残差,最后再将两条路径上的特征相加。 + +残差模块如图9所示,左边是基本模块连接方式,由两个输出通道数相同的3x3卷积组成。右边是瓶颈模块(Bottleneck)连接方式,之所以称为瓶颈,是因为上面的1x1卷积用来降维(图示例即256->64),下面的1x1卷积用来升维(图示例即64->256),这样中间3x3卷积的输入和输出通道数都较小(图示例即64->64)。 + +![ResNetBlock](./image/resnet_block.jpg) +

+图9. 残差模块 +

+ +图10展示了50、101、152层网络连接示意图,使用的是瓶颈模块。这三个模型的区别在于每组中残差模块的重复次数不同(见图右上角)。ResNet训练收敛较快,成功的训练了上百乃至近千层的卷积神经网络。 + +![ResNet](./image/resnet.png) +

+图10. 基于ImageNet的ResNet模型 +

+ + +## 数据准备 + +通用图像分类公开的标准数据集常用的有[CIFAR](https://www.cs.toronto.edu/~kriz/cifar.html)、[ImageNet](http://image-net.org/)、[COCO](http://mscoco.org/)等,常用的细粒度图像分类数据集包括[CUB-200-2011](http://www.vision.caltech.edu/visipedia/CUB-200-2011.html)、[Stanford Dog](http://vision.stanford.edu/aditya86/ImageNetDogs/)、[Oxford-flowers](http://www.robots.ox.ac.uk/~vgg/data/flowers/)等。其中ImageNet数据集规模相对较大,如[模型概览](#模型概览)一章所讲,大量研究成果基于ImageNet。ImageNet数据从2010年来稍有变化,常用的是ImageNet-2012数据集,该数据集包含1000个类别:训练集包含1,281,167张图片,每个类别数据732至1300张不等,验证集包含50,000张图片,平均每个类别50张图片。 + +由于ImageNet数据集较大,下载和训练较慢,为了方便大家学习,我们使用[CIFAR10]()数据集。CIFAR10数据集包含60,000张32x32的彩色图片,10个类别,每个类包含6,000张。其中50,000张图片作为训练集,10000张作为测试集。图11从每个类别中随机抽取了10张图片,展示了所有的类别。 + +![CIFAR](https://raw.githubusercontent.com/PaddlePaddle/book/develop/03.image_classification/image/cifar.png) +

+图11. CIFAR10数据集[21] +

+ +Paddle API提供了自动加载cifar数据集模块 `paddle.dataset.cifar`。 + +通过输入`python train.py`,就可以开始训练模型了,以下小节将详细介绍`train.py`的相关内容。 + +### 模型结构 + +#### Paddle 初始化 + +让我们从导入 Paddle Fluid API 和辅助模块开始。 + +```python +import paddle +import paddle.fluid as fluid +import numpy +import sys +``` + +本教程中我们提供了VGG和ResNet两个模型的配置。 + +#### VGG + +首先介绍VGG模型结构,由于CIFAR10图片大小和数量相比ImageNet数据小很多,因此这里的模型针对CIFAR10数据做了一定的适配。卷积部分引入了BN和Dropout操作。 +VGG核心模块的输入是数据层,`vgg_bn_drop` 定义了16层VGG结构,每层卷积后面引入BN层和Dropout层,详细的定义如下: + +```python +def vgg_bn_drop(input): +def conv_block(ipt, num_filter, groups, dropouts): +return fluid.nets.img_conv_group( +input=ipt, +pool_size=2, +pool_stride=2, +conv_num_filter=[num_filter] * groups, +conv_filter_size=3, +conv_act='relu', +conv_with_batchnorm=True, +conv_batchnorm_drop_rate=dropouts, +pool_type='max') + +conv1 = conv_block(input, 64, 2, [0.3, 0]) +conv2 = conv_block(conv1, 128, 2, [0.4, 0]) +conv3 = conv_block(conv2, 256, 3, [0.4, 0.4, 0]) +conv4 = conv_block(conv3, 512, 3, [0.4, 0.4, 0]) +conv5 = conv_block(conv4, 512, 3, [0.4, 0.4, 0]) + +drop = fluid.layers.dropout(x=conv5, dropout_prob=0.5) +fc1 = fluid.layers.fc(input=drop, size=512, act=None) +bn = fluid.layers.batch_norm(input=fc1, act='relu') +drop2 = fluid.layers.dropout(x=bn, dropout_prob=0.5) +fc2 = fluid.layers.fc(input=drop2, size=512, act=None) +predict = fluid.layers.fc(input=fc2, size=10, act='softmax') +return predict +``` + +1. 首先定义了一组卷积网络,即conv_block。卷积核大小为3x3,池化窗口大小为2x2,窗口滑动大小为2,groups决定每组VGG模块是几次连续的卷积操作,dropouts指定Dropout操作的概率。所使用的`img_conv_group`是在`paddle.networks`中预定义的模块,由若干组 Conv->BN->ReLu->Dropout 和 一组 Pooling 组成。 + +2. 五组卷积操作,即 5个conv_block。 第一、二组采用两次连续的卷积操作。第三、四、五组采用三次连续的卷积操作。每组最后一个卷积后面Dropout概率为0,即不使用Dropout操作。 + +3. 最后接两层512维的全连接。 + +4. 通过上面VGG网络提取高层特征,然后经过全连接层映射到类别维度大小的向量,再通过Softmax归一化得到每个类别的概率,也可称作分类器。 + +### ResNet + +ResNet模型的第1、3、4步和VGG模型相同,这里不再介绍。主要介绍第2步即CIFAR10数据集上ResNet核心模块。 + +先介绍`resnet_cifar10`中的一些基本函数,再介绍网络连接过程。 + +- `conv_bn_layer` : 带BN的卷积层。 +- `shortcut` : 残差模块的"直连"路径,"直连"实际分两种形式:残差模块输入和输出特征通道数不等时,采用1x1卷积的升维操作;残差模块输入和输出通道相等时,采用直连操作。 +- `basicblock` : 一个基础残差模块,即图9左边所示,由两组3x3卷积组成的路径和一条"直连"路径组成。 +- `bottleneck` : 一个瓶颈残差模块,即图9右边所示,由上下1x1卷积和中间3x3卷积组成的路径和一条"直连"路径组成。 +- `layer_warp` : 一组残差模块,由若干个残差模块堆积而成。每组中第一个残差模块滑动窗口大小与其他可以不同,以用来减少特征图在垂直和水平方向的大小。 + +```python +def conv_bn_layer(input, +ch_out, +filter_size, +stride, +padding, +act='relu', +bias_attr=False): +tmp = fluid.layers.conv2d( +input=input, +filter_size=filter_size, +num_filters=ch_out, +stride=stride, +padding=padding, +act=None, +bias_attr=bias_attr) +return fluid.layers.batch_norm(input=tmp, act=act) + + +def shortcut(input, ch_in, ch_out, stride): +if ch_in != ch_out: +return conv_bn_layer(input, ch_out, 1, stride, 0, None) +else: +return input + + +def basicblock(input, ch_in, ch_out, stride): +tmp = conv_bn_layer(input, ch_out, 3, stride, 1) +tmp = conv_bn_layer(tmp, ch_out, 3, 1, 1, act=None, bias_attr=True) +short = shortcut(input, ch_in, ch_out, stride) +return fluid.layers.elementwise_add(x=tmp, y=short, act='relu') + + +def layer_warp(block_func, input, ch_in, ch_out, count, stride): +tmp = block_func(input, ch_in, ch_out, stride) +for i in range(1, count): +tmp = block_func(tmp, ch_out, ch_out, 1) +return tmp +``` + +`resnet_cifar10` 的连接结构主要有以下几个过程。 + +1. 底层输入连接一层 `conv_bn_layer`,即带BN的卷积层。 +2. 然后连接3组残差模块即下面配置3组 `layer_warp` ,每组采用图 10 左边残差模块组成。 +3. 最后对网络做均值池化并返回该层。 + +注意:除过第一层卷积层和最后一层全连接层之外,要求三组 `layer_warp` 总的含参层数能够被6整除,即 `resnet_cifar10` 的 depth 要满足 `$(depth - 2) % 6 == 0$` 。 + +```python +def resnet_cifar10(ipt, depth=32): +# depth should be one of 20, 32, 44, 56, 110, 1202 +assert (depth - 2) % 6 == 0 +n = (depth - 2) / 6 +nStages = {16, 64, 128} +conv1 = conv_bn_layer(ipt, ch_out=16, filter_size=3, stride=1, padding=1) +res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) +res2 = layer_warp(basicblock, res1, 16, 32, n, 2) +res3 = layer_warp(basicblock, res2, 32, 64, n, 2) +pool = fluid.layers.pool2d( +input=res3, pool_size=8, pool_type='avg', pool_stride=1) +predict = fluid.layers.fc(input=pool, size=10, act='softmax') +return predict +``` + +## Infererence Program 配置 + +网络输入定义为 `data_layer` (数据层),在图像分类中即为图像像素信息。CIFRAR10是RGB 3通道32x32大小的彩色图,因此输入数据大小为3072(3x32x32)。 + +```python +def inference_program(): +# The image is 32 * 32 with RGB representation. +data_shape = [3, 32, 32] +images = fluid.layers.data(name='pixel', shape=data_shape, dtype='float32') + +predict = resnet_cifar10(images, 32) +# predict = vgg_bn_drop(images) # un-comment to use vgg net +return predict +``` + +## Train Program 配置 + +然后我们需要设置训练程序 `train_program`。它首先从推理程序中进行预测。 +在训练期间,它将从预测中计算 `avg_cost`。 +在有监督训练中需要输入图像对应的类别信息,同样通过`fluid.layers.data`来定义。训练中采用多类交叉熵作为损失函数,并作为网络的输出,预测阶段定义网络的输出为分类器得到的概率信息。 + +**注意:** 训练程序应该返回一个数组,第一个返回参数必须是 `avg_cost`。训练器使用它来计算梯度。 + +```python +def train_program(): +predict = inference_program() + +label = fluid.layers.data(name='label', shape=[1], dtype='int64') +cost = fluid.layers.cross_entropy(input=predict, label=label) +avg_cost = fluid.layers.mean(cost) +accuracy = fluid.layers.accuracy(input=predict, label=label) +return [avg_cost, accuracy] +``` + +## Optimizer Function 配置 + +在下面的 `Adam optimizer`,`learning_rate` 是训练的速度,与网络的训练收敛速度有关系。 + +```python +def optimizer_program(): +return fluid.optimizer.Adam(learning_rate=0.001) +``` + +## 训练模型 + +### Trainer 配置 + +现在,我们需要配置 `Trainer`。`Trainer` 需要接受训练程序 `train_program`, `place` 和优化器 `optimizer_func`。 + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +trainer = fluid.Trainer( +train_func=train_program, +optimizer_func=optimizer_program, +place=place) +``` + +### Data Feeders 配置 + +`cifar.train10()` 每次产生一条样本,在完成shuffle和batch之后,作为训练的输入。 + +```python +# Each batch will yield 128 images +BATCH_SIZE = 128 + +# Reader for training +train_reader = paddle.batch( +paddle.reader.shuffle(paddle.dataset.cifar.train10(), buf_size=50000), +batch_size=BATCH_SIZE) + +# Reader for testing. A separated data set for testing. +test_reader = paddle.batch( +paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) +``` + +### Event Handler + +可以使用`event_handler`回调函数来观察训练过程,或进行测试等, 该回调函数是`trainer.train`函数里设定。 + +`event_handler_plot`可以用来利用回调数据来打点画图: + +![png](./image/train_and_test.png) + +```python +params_dirname = "image_classification_resnet.inference.model" + +from paddle.v2.plot import Ploter + +train_title = "Train cost" +test_title = "Test cost" +cost_ploter = Ploter(train_title, test_title) + +step = 0 +def event_handler_plot(event): +global step +if isinstance(event, fluid.EndStepEvent): +if step % 1 == 0: +cost_ploter.append(train_title, step, event.metrics[0]) +cost_ploter.plot() +step += 1 +if isinstance(event, fluid.EndEpochEvent): +avg_cost, accuracy = trainer.test( +reader=test_reader, +feed_order=['pixel', 'label']) +cost_ploter.append(test_title, step, avg_cost) + +# save parameters +if params_dirname is not None: +trainer.save_params(params_dirname) +``` + +`event_handler` 用来在训练过程中输出文本日志 + +```python +params_dirname = "image_classification_resnet.inference.model" + +# event handler to track training and testing process +def event_handler(event): +if isinstance(event, fluid.EndStepEvent): +if event.step % 100 == 0: +print("\nPass %d, Batch %d, Cost %f, Acc %f" % +(event.step, event.epoch, event.metrics[0], +event.metrics[1])) +else: +sys.stdout.write('.') +sys.stdout.flush() + +if isinstance(event, fluid.EndEpochEvent): +# Test against with the test dataset to get accuracy. +avg_cost, accuracy = trainer.test( +reader=test_reader, feed_order=['pixel', 'label']) + +print('\nTest with Pass {0}, Loss {1:2.2}, Acc {2:2.2}'.format(event.epoch, avg_cost, accuracy)) + +# save parameters +if params_dirname is not None: +trainer.save_params(params_dirname) +``` + +### 训练 + +通过`trainer.train`函数训练: + +**注意:** CPU,每个 Epoch 将花费大约15~20分钟。这部分可能需要一段时间。请随意修改代码,在GPU上运行测试,以提高培训速度。 + +```python +trainer.train( +reader=train_reader, +num_epochs=2, +event_handler=event_handler, +feed_order=['pixel', 'label']) +``` + +一轮训练log示例如下所示,经过1个pass, 训练集上平均 Accuracy 为0.59 ,测试集上平均 Accuracy 为0.6 。 + +```text +Pass 0, Batch 0, Cost 3.869598, Acc 0.164062 +................................................................................................... +Pass 100, Batch 0, Cost 1.481038, Acc 0.460938 +................................................................................................... +Pass 200, Batch 0, Cost 1.340323, Acc 0.523438 +................................................................................................... +Pass 300, Batch 0, Cost 1.223424, Acc 0.593750 +.......................................................................................... +Test with Pass 0, Loss 1.1, Acc 0.6 +``` + +图12是训练的分类错误率曲线图,运行到第200个pass后基本收敛,最终得到测试集上分类错误率为8.54%。 + +![CIFARErrorRate](./image/plot.png) +

+图12. CIFAR10数据集上VGG模型的分类错误率 +

+ +## 应用模型 + +可以使用训练好的模型对图片进行分类,下面程序展示了如何使用 `fluid.Inferencer` 接口进行推断,可以打开注释,更改加载的模型。 + +### 生成预测输入数据 + +`dog.png` is an example image of a dog. Turn it into an numpy array to match the data feeder format. + +```python +# Prepare testing data. +from PIL import Image +import numpy as np +import os + +def load_image(file): +im = Image.open(file) +im = im.resize((32, 32), Image.ANTIALIAS) + +im = np.array(im).astype(np.float32) +# The storage order of the loaded image is W(width), +# H(height), C(channel). PaddlePaddle requires +# the CHW order, so transpose them. +im = im.transpose((2, 0, 1)) # CHW +im = im / 255.0 + +# Add one dimension to mimic the list format. +im = numpy.expand_dims(im, axis=0) +return im + +cur_dir = os.getcwd() +img = load_image(cur_dir + '/image/dog.png') +``` + +### Inferencer 配置和预测 + +`Inferencer` 需要一个 `infer_func` 和 `param_path` 来设置网络和经过训练的参数。 +我们可以简单地插入前面定义的推理程序。 +现在我们准备做预测。 + +```python +inferencer = fluid.Inferencer( +infer_func=inference_program, param_path=params_dirname, place=place) + +# inference +results = inferencer.infer({'pixel': img}) +print("infer results: ", results) +``` + +## 总结 + +传统图像分类方法由多个阶段构成,框架较为复杂,而端到端的CNN模型结构可一步到位,而且大幅度提升了分类准确率。本文我们首先介绍VGG、GoogleNet、ResNet三个经典的模型;然后基于CIFAR10数据集,介绍如何使用PaddlePaddle配置和训练CNN模型,尤其是VGG和ResNet模型;最后介绍如何使用PaddlePaddle的API接口对图片进行预测和特征提取。对于其他数据集比如ImageNet,配置和训练流程是同样的,大家可以自行进行实验。 + + +## 参考文献 + +[1] D. G. Lowe, [Distinctive image features from scale-invariant keypoints](http://www.cs.ubc.ca/~lowe/papers/ijcv04.pdf). IJCV, 60(2):91-110, 2004. + +[2] N. Dalal, B. Triggs, [Histograms of Oriented Gradients for Human Detection](http://vision.stanford.edu/teaching/cs231b_spring1213/papers/CVPR05_DalalTriggs.pdf), Proc. IEEE Conf. Computer Vision and Pattern Recognition, 2005. + +[3] Ahonen, T., Hadid, A., and Pietikinen, M. (2006). [Face description with local binary patterns: Application to face recognition](http://ieeexplore.ieee.org/document/1717463/). PAMI, 28. + +[4] J. Sivic, A. Zisserman, [Video Google: A Text Retrieval Approach to Object Matching in Videos](http://www.robots.ox.ac.uk/~vgg/publications/papers/sivic03.pdf), Proc. Ninth Int'l Conf. Computer Vision, pp. 1470-1478, 2003. + +[5] B. Olshausen, D. Field, [Sparse Coding with an Overcomplete Basis Set: A Strategy Employed by V1?](http://redwood.psych.cornell.edu/papers/olshausen_field_1997.pdf), Vision Research, vol. 37, pp. 3311-3325, 1997. + +[6] Wang, J., Yang, J., Yu, K., Lv, F., Huang, T., and Gong, Y. (2010). [Locality-constrained Linear Coding for image classification](http://ieeexplore.ieee.org/abstract/document/5540018/). In CVPR. + +[7] Perronnin, F., Sánchez, J., & Mensink, T. (2010). [Improving the fisher kernel for large-scale image classification](http://dl.acm.org/citation.cfm?id=1888101). In ECCV (4). + +[8] Lin, Y., Lv, F., Cao, L., Zhu, S., Yang, M., Cour, T., Yu, K., and Huang, T. (2011). [Large-scale image clas- sification: Fast feature extraction and SVM training](http://ieeexplore.ieee.org/document/5995477/). In CVPR. + +[9] Krizhevsky, A., Sutskever, I., and Hinton, G. (2012). [ImageNet classification with deep convolutional neu- ral networks](http://www.cs.toronto.edu/~kriz/imagenet_classification_with_deep_convolutional.pdf). In NIPS. + +[10] G.E. Hinton, N. Srivastava, A. Krizhevsky, I. Sutskever, and R.R. Salakhutdinov. [Improving neural networks by preventing co-adaptation of feature detectors](https://arxiv.org/abs/1207.0580). arXiv preprint arXiv:1207.0580, 2012. + +[11] K. Chatfield, K. Simonyan, A. Vedaldi, A. Zisserman. [Return of the Devil in the Details: Delving Deep into Convolutional Nets](https://arxiv.org/abs/1405.3531). BMVC, 2014。 + +[12] Szegedy, C., Liu, W., Jia, Y., Sermanet, P., Reed, S., Anguelov, D., Erhan, D., Vanhoucke, V., Rabinovich, A., [Going deeper with convolutions](https://arxiv.org/abs/1409.4842). In: CVPR. (2015) + +[13] Lin, M., Chen, Q., and Yan, S. [Network in network](https://arxiv.org/abs/1312.4400). In Proc. ICLR, 2014. + +[14] S. Ioffe and C. Szegedy. [Batch normalization: Accelerating deep network training by reducing internal covariate shift](https://arxiv.org/abs/1502.03167). In ICML, 2015. + +[15] K. He, X. Zhang, S. Ren, J. Sun. [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385). CVPR 2016. + +[16] Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., Wojna, Z. [Rethinking the incep-tion architecture for computer vision](https://arxiv.org/abs/1512.00567). In: CVPR. (2016). + +[17] Szegedy, C., Ioffe, S., Vanhoucke, V. [Inception-v4, inception-resnet and the impact of residual connections on learning](https://arxiv.org/abs/1602.07261). arXiv:1602.07261 (2016). + +[18] Everingham, M., Eslami, S. M. A., Van Gool, L., Williams, C. K. I., Winn, J. and Zisserman, A. [The Pascal Visual Object Classes Challenge: A Retrospective]((http://link.springer.com/article/10.1007/s11263-014-0733-5)). International Journal of Computer Vision, 111(1), 98-136, 2015. + +[19] He, K., Zhang, X., Ren, S., and Sun, J. [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://arxiv.org/abs/1502.01852). ArXiv e-prints, February 2015. + +[20] http://deeplearning.net/tutorial/lenet.html + +[21] https://www.cs.toronto.edu/~kriz/cifar.html + +[22] http://cs231n.github.io/classification/ + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/index.rst b/doc/fluid/new_docs/beginners_guide/basics/index.rst new file mode 100644 index 0000000000..d16f8b9472 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/index.rst @@ -0,0 +1,18 @@ +################ +深度学习基础知识 +################ + + +.. todo:: + + 概述 + +.. toctree:: + :maxdepth: 2 + + image_classification/index.md + word2vec/index.md + recommender_system/index.md + understand_sentiment/index.md + label_semantic_roles/index.md + machine_translation/index.md diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/.gitignore new file mode 100644 index 0000000000..29b5622a53 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/.gitignore @@ -0,0 +1,12 @@ +data/train.list +data/test.* +data/conll05st-release.tar.gz +data/conll05st-release +data/predicate_dict +data/label_dict +data/word_dict +data/emb +data/feature +output +predict.res +train.log diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm.png new file mode 100644 index 0000000000..e63f5ebd6d Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm_en.png new file mode 100755 index 0000000000..f0a195c24d Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bidirectional_stacked_lstm_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example.png new file mode 100755 index 0000000000..e5f7151c9f Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example_en.png new file mode 100755 index 0000000000..93b44dd487 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/bio_example_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network.png new file mode 100644 index 0000000000..592f7ee23b Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network_en.png new file mode 100755 index 0000000000..c3646312e4 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/db_lstm_network_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing.png new file mode 100755 index 0000000000..9265b67173 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing_en.png new file mode 100755 index 0000000000..23f4f45b60 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/dependency_parsing_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/linear_chain_crf.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/linear_chain_crf.png new file mode 100644 index 0000000000..0778fda74b Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/linear_chain_crf.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm.png new file mode 100644 index 0000000000..3d2914c726 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm_en.png new file mode 100755 index 0000000000..0b944ef91e Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/image/stacked_lstm_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/index.md b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/index.md new file mode 100644 index 0000000000..828ca73831 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/label_semantic_roles/index.md @@ -0,0 +1,568 @@ +# 语义角色标注 + +本教程源代码目录在[book/label_semantic_roles](https://github.com/PaddlePaddle/book/tree/develop/07.label_semantic_roles), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +自然语言分析技术大致分为三个层面:词法分析、句法分析和语义分析。语义角色标注是实现浅层语义分析的一种方式。在一个句子中,谓词是对主语的陈述或说明,指出“做什么”、“是什么”或“怎么样,代表了一个事件的核心,跟谓词搭配的名词称为论元。语义角色是指论元在动词所指事件中担任的角色。主要有:施事者(Agent)、受事者(Patient)、客体(Theme)、经验者(Experiencer)、受益者(Beneficiary)、工具(Instrument)、处所(Location)、目标(Goal)和来源(Source)等。 + +请看下面的例子,“遇到” 是谓词(Predicate,通常简写为“Pred”),“小明”是施事者(Agent),“小红”是受事者(Patient),“昨天” 是事件发生的时间(Time),“公园”是事情发生的地点(Location)。 + +$$\mbox{[小明]}_{\mbox{Agent}}\mbox{[昨天]}_{\mbox{Time}}\mbox{[晚上]}_{\mbox{Time}}\mbox{在[公园]}_{\mbox{Location}}\mbox{[遇到]}_{\mbox{Predicate}}\mbox{了[小红]}_{\mbox{Patient}}\mbox{。}$$ + +语义角色标注(Semantic Role Labeling,SRL)以句子的谓词为中心,不对句子所包含的语义信息进行深入分析,只分析句子中各成分与谓词之间的关系,即句子的谓词(Predicate)- 论元(Argument)结构,并用语义角色来描述这些结构关系,是许多自然语言理解任务(如信息抽取,篇章分析,深度问答等)的一个重要中间步骤。在研究中一般都假定谓词是给定的,所要做的就是找出给定谓词的各个论元和它们的语义角色。 + +传统的SRL系统大多建立在句法分析基础之上,通常包括5个流程: + +1. 构建一棵句法分析树,例如,图1是对上面例子进行依存句法分析得到的一棵句法树。 +2. 从句法树上识别出给定谓词的候选论元。 +3. 候选论元剪除;一个句子中的候选论元可能很多,候选论元剪除就是从大量的候选项中剪除那些最不可能成为论元的候选项。 +4. 论元识别:这个过程是从上一步剪除之后的候选中判断哪些是真正的论元,通常当做一个二分类问题来解决。 +5. 对第4步的结果,通过多分类得到论元的语义角色标签。可以看到,句法分析是基础,并且后续步骤常常会构造的一些人工特征,这些特征往往也来自句法分析。 + +![dependencyParsing](./image/dependency_parsing.png) +
+图1. 依存句法分析句法树示例 +
+ +然而,完全句法分析需要确定句子所包含的全部句法信息,并确定句子各成分之间的关系,是一个非常困难的任务,目前技术下的句法分析准确率并不高,句法分析的细微错误都会导致SRL的错误。为了降低问题的复杂度,同时获得一定的句法结构信息,“浅层句法分析”的思想应运而生。浅层句法分析也称为部分句法分析(partial parsing)或语块划分(chunking)。和完全句法分析得到一颗完整的句法树不同,浅层句法分析只需要识别句子中某些结构相对简单的独立成分,例如:动词短语,这些被识别出来的结构称为语块。为了回避 “无法获得准确率较高的句法树” 所带来的困难,一些研究\[[1](#参考文献)\]也提出了基于语块(chunk)的SRL方法。基于语块的SRL方法将SRL作为一个序列标注问题来解决。序列标注任务一般都会采用BIO表示方式来定义序列标注的标签集,我们先来介绍这种表示方法。在BIO表示法中,B代表语块的开始,I代表语块的中间,O代表语块结束。通过B、I、O 三种标记将不同的语块赋予不同的标签,例如:对于一个角色为A的论元,将它所包含的第一个语块赋予标签B-A,将它所包含的其它语块赋予标签I-A,不属于任何论元的语块赋予标签O。 + +我们继续以上面的这句话为例,图1展示了BIO表示方法。 + +![bioExample](./image/bio_example.png) +
+图2. BIO标注方法示例 +
+ +从上面的例子可以看到,根据序列标注结果可以直接得到论元的语义角色标注结果,是一个相对简单的过程。这种简单性体现在:(1)依赖浅层句法分析,降低了句法分析的要求和难度;(2)没有了候选论元剪除这一步骤;(3)论元的识别和论元标注是同时实现的。这种一体化处理论元识别和论元标注的方法,简化了流程,降低了错误累积的风险,往往能够取得更好的结果。 + +与基于语块的SRL方法类似,在本教程中我们也将SRL看作一个序列标注问题,不同的是,我们只依赖输入文本序列,不依赖任何额外的语法解析结果或是复杂的人造特征,利用深度神经网络构建一个端到端学习的SRL系统。我们以[CoNLL-2004 and CoNLL-2005 Shared Tasks](http://www.cs.upc.edu/~srlconll/)任务中SRL任务的公开数据集为例,实践下面的任务:给定一句话和这句话里的一个谓词,通过序列标注的方式,从句子中找到谓词对应的论元,同时标注它们的语义角色。 + +## 模型概览 + +循环神经网络(Recurrent Neural Network)是一种对序列建模的重要模型,在自然语言处理任务中有着广泛地应用。不同于前馈神经网络(Feed-forward Neural Network),RNN能够处理输入之间前后关联的问题。LSTM是RNN的一种重要变种,常用来学习长序列中蕴含的长程依赖关系,我们在[情感分析](https://github.com/PaddlePaddle/book/tree/develop/05.understand_sentiment)一篇中已经介绍过,这一篇中我们依然利用LSTM来解决SRL问题。 + +### 栈式循环神经网络(Stacked Recurrent Neural Network) + +深层网络有助于形成层次化特征,网络上层在下层已经学习到的初级特征基础上,形成更复杂的高级特征。尽管LSTM沿时间轴展开后等价于一个非常“深”的前馈网络,但由于LSTM各个时间步参数共享,`$t-1$`时刻状态到`$t$`时刻的映射,始终只经过了一次非线性映射,也就是说单层LSTM对状态转移的建模是 “浅” 的。堆叠多个LSTM单元,令前一个LSTM`$t$`时刻的输出,成为下一个LSTM单元`$t$`时刻的输入,帮助我们构建起一个深层网络,我们把它称为第一个版本的栈式循环神经网络。深层网络提高了模型拟合复杂模式的能力,能够更好地建模跨不同时间步的模式\[[2](#参考文献)\]。 + +然而,训练一个深层LSTM网络并非易事。纵向堆叠多个LSTM单元可能遇到梯度在纵向深度上传播受阻的问题。通常,堆叠4层LSTM单元可以正常训练,当层数达到4~8层时,会出现性能衰减,这时必须考虑一些新的结构以保证梯度纵向顺畅传播,这是训练深层LSTM网络必须解决的问题。我们可以借鉴LSTM解决 “梯度消失梯度爆炸” 问题的智慧之一:在记忆单元(Memory Cell)这条信息传播的路线上没有非线性映射,当梯度反向传播时既不会衰减、也不会爆炸。因此,深层LSTM模型也可以在纵向上添加一条保证梯度顺畅传播的路径。 + +一个LSTM单元完成的运算可以被分为三部分:(1)输入到隐层的映射(input-to-hidden) :每个时间步输入信息`$x$`会首先经过一个矩阵映射,再作为遗忘门,输入门,记忆单元,输出门的输入,注意,这一次映射没有引入非线性激活;(2)隐层到隐层的映射(hidden-to-hidden):这一步是LSTM计算的主体,包括遗忘门,输入门,记忆单元更新,输出门的计算;(3)隐层到输出的映射(hidden-to-output):通常是简单的对隐层向量进行激活。我们在第一个版本的栈式网络的基础上,加入一条新的路径:除上一层LSTM输出之外,将前层LSTM的输入到隐层的映射作为的一个新的输入,同时加入一个线性映射去学习一个新的变换。 + +图3是最终得到的栈式循环神经网络结构示意图。 + +![lstmStructure](./image/stacked_lstm.png) +

+图3. 基于LSTM的栈式循环神经网络结构示意图 +

+ +### 双向循环神经网络(Bidirectional Recurrent Neural Network) + +在LSTM中,`$t$`时刻的隐藏层向量编码了到`$t$`时刻为止所有输入的信息,但`$t$`时刻的LSTM可以看到历史,却无法看到未来。在绝大多数自然语言处理任务中,我们几乎总是能拿到整个句子。这种情况下,如果能够像获取历史信息一样,得到未来的信息,对序列学习任务会有很大的帮助。 + +为了克服这一缺陷,我们可以设计一种双向循环网络单元,它的思想简单且直接:对上一节的栈式循环神经网络进行一个小小的修改,堆叠多个LSTM单元,让每一层LSTM单元分别以:正向、反向、正向 …… 的顺序学习上一层的输出序列。于是,从第2层开始,`$t$`时刻我们的LSTM单元便总是可以看到历史和未来的信息。图4是基于LSTM的双向循环神经网络结构示意图。 + +![lstmStructure](./image/bidirectional_stacked_lstm.png) +

+图4. 基于LSTM的双向循环神经网络结构示意图 +

+ +需要说明的是,这种双向RNN结构和Bengio等人在机器翻译任务中使用的双向RNN结构\[[3](#参考文献), [4](#参考文献)\] 并不相同,我们会在后续[机器翻译](https://github.com/PaddlePaddle/book/blob/develop/08.machine_translation/README.cn.md)任务中,介绍另一种双向循环神经网络。 + +### 条件随机场 (Conditional Random Field) + +使用神经网络模型解决问题的思路通常是:前层网络学习输入的特征表示,网络的最后一层在特征基础上完成最终的任务。在SRL任务中,深层LSTM网络学习输入的特征表示,条件随机场(Conditional Random Filed, CRF)在特征的基础上完成序列标注,处于整个网络的末端。 + +CRF是一种概率化结构模型,可以看作是一个概率无向图模型,结点表示随机变量,边表示随机变量之间的概率依赖关系。简单来讲,CRF学习条件概率`$P(X|Y)$`,其中 `$X = (x_1, x_2, ... , x_n)$` 是输入序列,`$Y = (y_1, y_2, ... , y_n)$` 是标记序列;解码过程是给定 `$X$`序列求解令`$P(Y|X)$`最大的`$Y$`序列,即`$Y^* = \mbox{arg max}_{Y} P(Y | X)$`。 + +序列标注任务只需要考虑输入和输出都是一个线性序列,并且由于我们只是将输入序列作为条件,不做任何条件独立假设,因此输入序列的元素之间并不存在图结构。综上,在序列标注任务中使用的是如图5所示的定义在链式图上的CRF,称之为线性链条件随机场(Linear Chain Conditional Random Field)。 + +![linear_chain_crf](./image/linear_chain_crf.png) +

+图5. 序列标注任务中使用的线性链条件随机场 +

+ +根据线性链条件随机场上的因子分解定理\[[5](#参考文献)\],在给定观测序列`$X$`时,一个特定标记序列`$Y$`的概率可以定义为: + +$$p(Y | X) = \frac{1}{Z(X)} \text{exp}\left(\sum_{i=1}^{n}\left(\sum_{j}\lambda_{j}t_{j} (y_{i - 1}, y_{i}, X, i) + \sum_{k} \mu_k s_k (y_i, X, i)\right)\right)$$ + +其中`$Z(X)$`是归一化因子,`$t_j$` 是定义在边上的特征函数,依赖于当前和前一个位置,称为转移特征,表示对于输入序列`$X$`及其标注序列在 `$i$`及`$i - 1$`位置上标记的转移概率。`$s_k$`是定义在结点上的特征函数,称为状态特征,依赖于当前位置,表示对于观察序列`$X$`及其`$i$`位置的标记概率。`$\lambda_j$` 和 `$\mu_k$` 分别是转移特征函数和状态特征函数对应的权值。实际上,`$t$`和`$s$`可以用相同的数学形式表示,再对转移特征和状态特在各个位置`$i$`求和有:`$f_{k}(Y, X) = \sum_{i=1}^{n}f_k({y_{i - 1}, y_i, X, i})$`,把`$f$`统称为特征函数,于是`$P(Y|X)$`可表示为: + +$$p(Y|X, W) = \frac{1}{Z(X)}\text{exp}\sum_{k}\omega_{k}f_{k}(Y, X)$$ + +`$\omega$`是特征函数对应的权值,是CRF模型要学习的参数。训练时,对于给定的输入序列和对应的标记序列集合`$D = \left[(X_1, Y_1), (X_2 , Y_2) , ... , (X_N, Y_N)\right]$` ,通过正则化的极大似然估计,求解如下优化目标: + +$$\DeclareMathOperator*{\argmax}{arg\,max} L(\lambda, D) = - \text{log}\left(\prod_{m=1}^{N}p(Y_m|X_m, W)\right) + C \frac{1}{2}\lVert W\rVert^{2}$$ + +这个优化目标可以通过反向传播算法和整个神经网络一起求解。解码时,对于给定的输入序列`$X$`,通过解码算法(通常有:维特比算法、Beam Search)求令出条件概率`$\bar{P}(Y|X)$`最大的输出序列 `$\bar{Y}$`。 + +### 深度双向LSTM(DB-LSTM)SRL模型 + +在SRL任务中,输入是 “谓词” 和 “一句话”,目标是从这句话中找到谓词的论元,并标注论元的语义角色。如果一个句子含有`$n$`个谓词,这个句子会被处理`$n$`次。一个最为直接的模型是下面这样: + +1. 构造输入; +- 输入1是谓词,输入2是句子 +- 将输入1扩展成和输入2一样长的序列,用one-hot方式表示; +2. one-hot方式的谓词序列和句子序列通过词表,转换为实向量表示的词向量序列; +3. 将步骤2中的2个词向量序列作为双向LSTM的输入,学习输入序列的特征表示; +4. CRF以步骤3中模型学习到的特征为输入,以标记序列为监督信号,实现序列标注; + +大家可以尝试上面这种方法。这里,我们提出一些改进,引入两个简单但对提高系统性能非常有效的特征: + +- 谓词上下文:上面的方法中,只用到了谓词的词向量表达谓词相关的所有信息,这种方法始终是非常弱的,特别是如果谓词在句子中出现多次,有可能引起一定的歧义。从经验出发,谓词前后若干个词的一个小片段,能够提供更丰富的信息,帮助消解歧义。于是,我们把这样的经验也添加到模型中,为每个谓词同时抽取一个“谓词上下文” 片段,也就是从这个谓词前后各取`$n$`个词构成的一个窗口片段; +- 谓词上下文区域标记:为句子中的每一个词引入一个0-1二值变量,表示它们是否在“谓词上下文”片段中; + +修改后的模型如下(图6是一个深度为4的模型结构示意图): + +1. 构造输入 +- 输入1是句子序列,输入2是谓词序列,输入3是谓词上下文,从句子中抽取这个谓词前后各`$n$`个词,构成谓词上下文,用one-hot方式表示,输入4是谓词上下文区域标记,标记了句子中每一个词是否在谓词上下文中; +- 将输入2~3均扩展为和输入1一样长的序列; +2. 输入1~4均通过词表取词向量转换为实向量表示的词向量序列;其中输入1、3共享同一个词表,输入2和4各自独有词表; +3. 第2步的4个词向量序列作为双向LSTM模型的输入;LSTM模型学习输入序列的特征表示,得到新的特性表示序列; +4. CRF以第3步中LSTM学习到的特征为输入,以标记序列为监督信号,完成序列标注; + +![db_lstm_network](./image/db_lstm_network.png) +
+图6. SRL任务上的深层双向LSTM模型 +
+ + +## 数据介绍 + +在此教程中,我们选用[CoNLL 2005](http://www.cs.upc.edu/~srlconll/)SRL任务开放出的数据集作为示例。需要特别说明的是,CoNLL 2005 SRL任务的训练数集和开发集在比赛之后并非免费进行公开,目前,能够获取到的只有测试集,包括Wall Street Journal的23节和Brown语料集中的3节。在本教程中,我们以测试集中的WSJ数据为训练集来讲解模型。但是,由于测试集中样本的数量远远不够,如果希望训练一个可用的神经网络SRL系统,请考虑付费获取全量数据。 + +原始数据中同时包括了词性标注、命名实体识别、语法解析树等多种信息。本教程中,我们使用test.wsj文件夹中的数据进行训练和测试,并只会用到words文件夹(文本序列)和props文件夹(标注结果)下的数据。本教程使用的数据目录如下: + +```text +conll05st-release/ +└── test.wsj +├── props # 标注结果 +└── words # 输入文本序列 +``` + +标注信息源自Penn TreeBank\[[7](#参考文献)\]和PropBank\[[8](#参考文献)\]的标注结果。PropBank标注结果的标签和我们在文章一开始示例中使用的标注结果标签不同,但原理是相同的,关于标注结果标签含义的说明,请参考论文\[[9](#参考文献)\]。 + +原始数据需要进行数据预处理才能被PaddlePaddle处理,预处理包括下面几个步骤: + +1. 将文本序列和标记序列其合并到一条记录中; +2. 一个句子如果含有`$n$`个谓词,这个句子会被处理`$n$`次,变成`$n$`条独立的训练样本,每个样本一个不同的谓词; +3. 抽取谓词上下文和构造谓词上下文区域标记; +4. 构造以BIO法表示的标记; +5. 依据词典获取词对应的整数索引。 + + +```python +# import paddle.v2.dataset.conll05 as conll05 +# conll05.corpus_reader函数完成上面第1步和第2步. +# conll05.reader_creator函数完成上面第3步到第5步. +# conll05.test函数可以获取处理之后的每条样本来供PaddlePaddle训练. +``` + +预处理完成之后一条训练样本包含9个特征,分别是:句子序列、谓词、谓词上下文(占 5 列)、谓词上下区域标志、标注序列。下表是一条训练样本的示例。 + +| 句子序列 | 谓词 | 谓词上下文(窗口 = 5) | 谓词上下文区域标记 | 标注序列 | +|---|---|---|---|---| +| A | set | n't been set . × | 0 | B-A1 | +| record | set | n't been set . × | 0 | I-A1 | +| date | set | n't been set . × | 0 | I-A1 | +| has | set | n't been set . × | 0 | O | +| n't | set | n't been set . × | 1 | B-AM-NEG | +| been | set | n't been set . × | 1 | O | +| set | set | n't been set . × | 1 | B-V | +| . | set | n't been set . × | 1 | O | + + +除数据之外,我们同时提供了以下资源: + +| 文件名称 | 说明 | +|---|---| +| word_dict | 输入句子的词典,共计44068个词 | +| label_dict | 标记的词典,共计106个标记 | +| predicate_dict | 谓词的词典,共计3162个词 | +| emb | 一个训练好的词表,32维 | + +我们在英文维基百科上训练语言模型得到了一份词向量用来初始化SRL模型。在SRL模型训练过程中,词向量不再被更新。关于语言模型和词向量可以参考[词向量](https://github.com/PaddlePaddle/book/blob/develop/04.word2vec/README.cn.md) 这篇教程。我们训练语言模型的语料共有995,000,000个token,词典大小控制为4900,000词。CoNLL 2005训练语料中有5%的词不在这4900,000个词中,我们将它们全部看作未登录词,用``表示。 + +获取词典,打印词典大小: + +```python +import math, os +import numpy as np +import paddle +import paddle.v2.dataset.conll05 as conll05 +import paddle.fluid as fluid +import time + +with_gpu = os.getenv('WITH_GPU', '0') != '0' + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) + +print word_dict_len +print label_dict_len +print pred_dict_len +``` + +## 模型配置说明 + +- 定义输入数据维度及模型超参数。 + +```python +mark_dict_len = 2 # 谓上下文区域标志的维度,是一个0-1 2值特征,因此维度为2 +word_dim = 32 # 词向量维度 +mark_dim = 5 # 谓词上下文区域通过词表被映射为一个实向量,这个是相邻的维度 +hidden_dim = 512 # LSTM隐层向量的维度 : 512 / 4 +depth = 8 # 栈式LSTM的深度 +mix_hidden_lr = 1e-3 + +IS_SPARSE = True +PASS_NUM = 10 +BATCH_SIZE = 10 + +embedding_name = 'emb' +``` + +这里需要特别说明的是hidden_dim = 512指定了LSTM隐层向量的维度为128维,关于这一点请参考PaddlePaddle官方文档中[lstmemory](http://www.paddlepaddle.org/doc/ui/api/trainer_config_helpers/layers.html#lstmemory)的说明。 + +- 如上文提到,我们用基于英文维基百科训练好的词向量来初始化序列输入、谓词上下文总共6个特征的embedding层参数,在训练中不更新。 + +```python +# 这里加载PaddlePaddle上版保存的二进制模型 +def load_parameter(file_name, h, w): +with open(file_name, 'rb') as f: +f.read(16) # skip header. +return np.fromfile(f, dtype=np.float32).reshape(h, w) +``` + +- 8个LSTM单元以“正向/反向”的顺序对所有输入序列进行学习。 + +```python +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, +**ignored): +# 8 features +predicate_embedding = fluid.layers.embedding( +input=predicate, +size=[pred_dict_len, word_dim], +dtype='float32', +is_sparse=IS_SPARSE, +param_attr='vemb') + +mark_embedding = fluid.layers.embedding( +input=mark, +size=[mark_dict_len, mark_dim], +dtype='float32', +is_sparse=IS_SPARSE) + +word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] +# Since word vector lookup table is pre-trained, we won't update it this time. +# trainable being False prevents updating the lookup table during training. +emb_layers = [ +fluid.layers.embedding( +size=[word_dict_len, word_dim], +input=x, +param_attr=fluid.ParamAttr( +name=embedding_name, trainable=False)) for x in word_input +] +emb_layers.append(predicate_embedding) +emb_layers.append(mark_embedding) + +# 8 LSTM units are trained through alternating left-to-right / right-to-left order +# denoted by the variable `reverse`. +hidden_0_layers = [ +fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') +for emb in emb_layers +] + +hidden_0 = fluid.layers.sums(input=hidden_0_layers) + +lstm_0 = fluid.layers.dynamic_lstm( +input=hidden_0, +size=hidden_dim, +candidate_activation='relu', +gate_activation='sigmoid', +cell_activation='sigmoid') + +# stack L-LSTM and R-LSTM with direct edges +input_tmp = [hidden_0, lstm_0] + +# In PaddlePaddle, state features and transition features of a CRF are implemented +# by a fully connected layer and a CRF layer seperately. The fully connected layer +# with linear activation learns the state features, here we use fluid.layers.sums +# (fluid.layers.fc can be uesed as well), and the CRF layer in PaddlePaddle: +# fluid.layers.linear_chain_crf only +# learns the transition features, which is a cost layer and is the last layer of the network. +# fluid.layers.linear_chain_crf outputs the log probability of true tag sequence +# as the cost by given the input sequence and it requires the true tag sequence +# as target in the learning process. + +for i in range(1, depth): +mix_hidden = fluid.layers.sums(input=[ +fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), +fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') +]) + +lstm = fluid.layers.dynamic_lstm( +input=mix_hidden, +size=hidden_dim, +candidate_activation='relu', +gate_activation='sigmoid', +cell_activation='sigmoid', +is_reverse=((i % 2) == 1)) + +input_tmp = [mix_hidden, lstm] + +# 取最后一个栈式LSTM的输出和这个LSTM单元的输入到隐层映射, +# 经过一个全连接层映射到标记字典的维度,来学习 CRF 的状态特征 +feature_out = fluid.layers.sums(input=[ +fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), +fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') +]) + +return feature_out +``` + +## 训练模型 + +- 我们根据网络拓扑结构和模型参数来构造出trainer用来训练,在构造时还需指定优化方法,这里使用最基本的SGD方法(momentum设置为0),同时设定了学习率、正则等。 + +- 数据介绍部分提到CoNLL 2005训练集付费,这里我们使用测试集训练供大家学习。conll05.test()每次产生一条样本,包含9个特征,shuffle和组完batch后作为训练的输入。 + +- 通过feeding来指定每一个数据和data_layer的对应关系。 例如 下面feeding表示: conll05.test()产生数据的第0列对应word_data层的特征。 + +- 可以使用event_handler回调函数来观察训练过程,或进行测试等。这里我们打印了训练过程的cost,该回调函数是trainer.train函数里设定。 + +- 通过trainer.train函数训练 + +```python +def train(use_cuda, save_dirname=None, is_local=True): +# define network topology + +# 句子序列 +word = fluid.layers.data( +name='word_data', shape=[1], dtype='int64', lod_level=1) + +# 谓词 +predicate = fluid.layers.data( +name='verb_data', shape=[1], dtype='int64', lod_level=1) + +# 谓词上下文5个特征 +ctx_n2 = fluid.layers.data( +name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) +ctx_n1 = fluid.layers.data( +name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) +ctx_0 = fluid.layers.data( +name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) +ctx_p1 = fluid.layers.data( +name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) +ctx_p2 = fluid.layers.data( +name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + +# 谓词上下区域标志 +mark = fluid.layers.data( +name='mark_data', shape=[1], dtype='int64', lod_level=1) + +# define network topology +feature_out = db_lstm(**locals()) + +# 标注序列 +target = fluid.layers.data( +name='target', shape=[1], dtype='int64', lod_level=1) + +# 学习 CRF 的转移特征 +crf_cost = fluid.layers.linear_chain_crf( +input=feature_out, +label=target, +param_attr=fluid.ParamAttr( +name='crfw', learning_rate=mix_hidden_lr)) + +avg_cost = fluid.layers.mean(crf_cost) + +sgd_optimizer = fluid.optimizer.SGD( +learning_rate=fluid.layers.exponential_decay( +learning_rate=0.01, +decay_steps=100000, +decay_rate=0.5, +staircase=True)) + +sgd_optimizer.minimize(avg_cost) + +# The CRF decoding layer is used for evaluation and inference. +# It shares weights with CRF layer. The sharing of parameters among multiple layers +# is specified by using the same parameter name in these layers. If true tag sequence +# is provided in training process, `fluid.layers.crf_decoding` calculates labelling error +# for each input token and sums the error over the entire sequence. +# Otherwise, `fluid.layers.crf_decoding` generates the labelling tags. +crf_decode = fluid.layers.crf_decoding( +input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) + +train_data = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.conll05.test(), buf_size=8192), +batch_size=BATCH_SIZE) + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + +feeder = fluid.DataFeeder( +feed_list=[ +word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, mark, target +], +place=place) +exe = fluid.Executor(place) + +def train_loop(main_program): +exe.run(fluid.default_startup_program()) +embedding_param = fluid.global_scope().find_var( +embedding_name).get_tensor() +embedding_param.set( +load_parameter(conll05.get_embedding(), word_dict_len, word_dim), +place) + +start_time = time.time() +batch_id = 0 +for pass_id in xrange(PASS_NUM): +for data in train_data(): +cost = exe.run(main_program, +feed=feeder.feed(data), +fetch_list=[avg_cost]) +cost = cost[0] + +if batch_id % 10 == 0: +print("avg_cost:" + str(cost)) +if batch_id != 0: +print("second per batch: " + str((time.time( +) - start_time) / batch_id)) +# Set the threshold low to speed up the CI test +if float(cost) < 60.0: +if save_dirname is not None: +fluid.io.save_inference_model(save_dirname, [ +'word_data', 'verb_data', 'ctx_n2_data', +'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', +'ctx_p2_data', 'mark_data' +], [feature_out], exe) +return + +batch_id = batch_id + 1 + +train_loop(fluid.default_main_program()) +``` + + +## 应用模型 + +训练完成之后,需要依据某个我们关心的性能指标选择最优的模型进行预测,可以简单的选择测试集上标记错误最少的那个模型。以下我们给出一个使用训练后的模型进行预测的示例。 + +```python +def infer(use_cuda, save_dirname=None): +if save_dirname is None: +return + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +exe = fluid.Executor(place) + +inference_scope = fluid.core.Scope() +with fluid.scope_guard(inference_scope): +# Use fluid.io.load_inference_model to obtain the inference program desc, +# the feed_target_names (the names of variables that will be fed +# data using feed operators), and the fetch_targets (variables that +# we want to obtain data from using fetch operators). +[inference_program, feed_target_names, +fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + +# Setup inputs by creating LoDTensors to represent sequences of words. +# Here each word is the basic element of these LoDTensors and the shape of +# each word (base_shape) should be [1] since it is simply an index to +# look up for the corresponding word vector. +# Suppose the length_based level of detail (lod) info is set to [[3, 4, 2]], +# which has only one lod level. Then the created LoDTensors will have only +# one higher level structure (sequence of words, or sentence) than the basic +# element (word). Hence the LoDTensor will hold data for three sentences of +# length 3, 4 and 2, respectively. +# Note that lod info should be a list of lists. +lod = [[3, 4, 2]] +base_shape = [1] +# The range of random integers is [low, high] +word = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=word_dict_len - 1) +pred = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=pred_dict_len - 1) +ctx_n2 = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_n1 = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_0 = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_p1 = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=word_dict_len - 1) +ctx_p2 = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=word_dict_len - 1) +mark = fluid.create_random_int_lodtensor( +lod, base_shape, place, low=0, high=mark_dict_len - 1) + +# Construct feed as a dictionary of {feed_target_name: feed_target_data} +# and results will contain a list of data corresponding to fetch_targets. +assert feed_target_names[0] == 'word_data' +assert feed_target_names[1] == 'verb_data' +assert feed_target_names[2] == 'ctx_n2_data' +assert feed_target_names[3] == 'ctx_n1_data' +assert feed_target_names[4] == 'ctx_0_data' +assert feed_target_names[5] == 'ctx_p1_data' +assert feed_target_names[6] == 'ctx_p2_data' +assert feed_target_names[7] == 'mark_data' + +results = exe.run(inference_program, +feed={ +feed_target_names[0]: word, +feed_target_names[1]: pred, +feed_target_names[2]: ctx_n2, +feed_target_names[3]: ctx_n1, +feed_target_names[4]: ctx_0, +feed_target_names[5]: ctx_p1, +feed_target_names[6]: ctx_p2, +feed_target_names[7]: mark +}, +fetch_list=fetch_targets, +return_numpy=False) +print(results[0].lod()) +np_data = np.array(results[0]) +print("Inference Shape: ", np_data.shape) +``` + +整个程序的入口如下: + +```python +def main(use_cuda, is_local=True): +if use_cuda and not fluid.core.is_compiled_with_cuda(): +return + +# Directory for saving the trained model +save_dirname = "label_semantic_roles.inference.model" + +train(use_cuda, save_dirname, is_local) +infer(use_cuda, save_dirname) + + +main(use_cuda=False) +``` + +## 总结 + +语义角色标注是许多自然语言理解任务的重要中间步骤。这篇教程中我们以语义角色标注任务为例,介绍如何利用PaddlePaddle进行序列标注任务。教程中所介绍的模型来自我们发表的论文\[[10](#参考文献)\]。由于 CoNLL 2005 SRL任务的训练数据目前并非完全开放,教程中只使用测试数据作为示例。在这个过程中,我们希望减少对其它自然语言处理工具的依赖,利用神经网络数据驱动、端到端学习的能力,得到一个和传统方法可比、甚至更好的模型。在论文中我们证实了这种可能性。关于模型更多的信息和讨论可以在论文中找到。 + +## 参考文献 +1. Sun W, Sui Z, Wang M, et al. [Chinese semantic role labeling with shallow parsing](http://www.aclweb.org/anthology/D09-1#page=1513)[C]//Proceedings of the 2009 Conference on Empirical Methods in Natural Language Processing: Volume 3-Volume 3. Association for Computational Linguistics, 2009: 1475-1483. +2. Pascanu R, Gulcehre C, Cho K, et al. [How to construct deep recurrent neural networks](https://arxiv.org/abs/1312.6026)[J]. arXiv preprint arXiv:1312.6026, 2013. +3. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](https://arxiv.org/abs/1406.1078)[J]. arXiv preprint arXiv:1406.1078, 2014. +4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[J]. arXiv preprint arXiv:1409.0473, 2014. +5. Lafferty J, McCallum A, Pereira F. [Conditional random fields: Probabilistic models for segmenting and labeling sequence data](http://www.jmlr.org/papers/volume15/doppa14a/source/biblio.bib.old)[C]//Proceedings of the eighteenth international conference on machine learning, ICML. 2001, 1: 282-289. +6. 李航. 统计学习方法[J]. 清华大学出版社, 北京, 2012. +7. Marcus M P, Marcinkiewicz M A, Santorini B. [Building a large annotated corpus of English: The Penn Treebank](http://repository.upenn.edu/cgi/viewcontent.cgi?article=1246&context=cis_reports)[J]. Computational linguistics, 1993, 19(2): 313-330. +8. Palmer M, Gildea D, Kingsbury P. [The proposition bank: An annotated corpus of semantic roles](http://www.mitpressjournals.org/doi/pdfplus/10.1162/0891201053630264)[J]. Computational linguistics, 2005, 31(1): 71-106. +9. Carreras X, Màrquez L. [Introduction to the CoNLL-2005 shared task: Semantic role labeling](http://www.cs.upc.edu/~srlconll/st05/papers/intro.pdf)[C]//Proceedings of the Ninth Conference on Computational Natural Language Learning. Association for Computational Linguistics, 2005: 152-164. +10. Zhou J, Xu W. [End-to-end learning of semantic role labeling using recurrent neural networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf)[C]//Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/learning_materials.md b/doc/fluid/new_docs/beginners_guide/basics/learning_materials.md new file mode 100644 index 0000000000..a27499c6ed --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/learning_materials.md @@ -0,0 +1,54 @@ +# 学习资料 + +## 要读的第一本书 +基础理论习得的最直接来源就是书本。按机器学习理论、深度学习理论、编程语言三方面划分,这里推荐如下书籍辅助您。 + + +### 机器学习理论 + +在开启深度学习之前,您需要先行掌握机器学习的理论。深度学习是机器学习中的一个分支,两者内在的理论基础存在强关联。 +机器学习理论的书籍教材比较多,这里推荐一本易懂易学的书籍,可以重点关注神经网络部分。 + +书名:《机器学习》(周志华著,清华大学出版社,2016年版) + +### 深度学习理论 + +打好机器学习的理论功底后,您可以开始钻研深度学习的理论。通常深度学习理论会给人留下抽象难懂的印象,且和数学结合紧密。 +为了让您能够顺利入门,这里推荐一份易学易用的教材,无论深度学习理论还是数学理论即可一本搞定。 + +书名:《Deep Learning(深度学习)》(Goodfellow, Bengio, Courville合著,赵申剑、黎彧君、符天凡和李凯合译,人民邮电出版社,2017年版) +此书电子版在Github上已经开源,详情可参考此链接 [《深度学习》](https://github.com/exacity/deeplearningbook-chinese) + +### 编程语言 + +Python方向:这里推荐您学习Python,一方面各大主流深度学习框架的主力支撑编程语言均为Python;另一方面,对比其他语言,Python较为简单易学。 +Python的教材种类较多,这里推荐一本实操和理论性都兼顾的教材,只要完成书中52个习题,跑代码然后发现问题解决,就能逐步上手。 + +书名:《“笨办法”学Python》(Zed Shaw著,王巍巍译,人民邮电出版社,2014年11月版) + + +C++方向:C++语言在底层框架中使用较多,您逐步掌握开源框架的基本操作后,在更高阶的框架应用中会用到这个技能点。 +同前面提到的Python一样,学习C++时需要多上手操作。这里推荐迅速上手C++的书籍,不但能够学习功能和结构,还提供了解决方案的示例。 + +书名:《Essential C++》【美】李普曼(Lippman,S.B.)著,侯捷译,电子工业出版社2013年8月版 + + + +## 要看的视频公开课 + +在学习一门新技术的同时,除了看书,如果有老师面对面教授,可以更快更好的学会知识。相比于线下授课,视频公开课能够在省钱省力的同时,达到易学易掌握的效果。 +目前深度学习的课程多是公开免费的,通过学习您可以更轻松的理解深度学习中的抽象理论,并在实操方面不绕弯路。 +综合课程生动性、可操作性、紧凑性、连续性这些特点,这里推荐如下课程,同步附上网址,便于您查找学习。 + +### 理论知识详解视频课 +[机器学习](http://open.163.com/special/opencourse/machinelearning.html) 斯坦福大学教授吴恩达公开课程,包含相关算法的详细讲解。 + +[AI技术](https://ai.baidu.com/paddlepaddle/player?id=13) 百度推出的“AI核心技术掌握”课程,每节课在20-30分钟左右,从AI技术到深度学习进行全面细致的解读。 + +[深度学习](http://speech.ee.ntu.edu.tw/~tlkagk/courses_ML17_2.html) 台湾李宏毅教授的在线课程,其中是英文课程,会结合国外的科研成果,但也适合新手入门和理解深度学习。 + +[编程语言](https://ai.baidu.com/paddlepaddle/openCourses) Python操作课程,从基础到进阶操作都提供详细说明,每节课时长20分钟左右。 + +### PaddlePaddle实操视频课 +掌握好理论基础,具备编程能力后,您可以开始使用PaddlePaddle Fluid进行实操,从初阶开始学习,向着中高阶努力。 +目前已有PaddlePaddle官方视频公开课在官网呈现,内含PaddlePaddle实战、PaddlePaddle应用场景和机器学习模型讲解课程,帮助开发者从零开始使用PaddlePaddle,从简单场景逐步过渡到工业级应用。[点击这里](http://ai.baidu.com/paddlepaddle/openCourses)您即可开始视频课的学习之旅。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/.gitignore new file mode 100644 index 0000000000..6129b9e864 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/.gitignore @@ -0,0 +1,9 @@ +data/wmt14 +data/pre-wmt14 +pretrained/wmt14_model +gen.log +gen_result +train.log +dataprovider_copy_1.py +*.pyc +multi-bleu.perl diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn.png new file mode 100644 index 0000000000..9d8efd50a4 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn_en.png new file mode 100755 index 0000000000..4b35c88fc8 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/bi_rnn_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention.png new file mode 100644 index 0000000000..1b355e7786 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention_en.png new file mode 100755 index 0000000000..3728f782ee Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/decoder_attention_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention.png new file mode 100644 index 0000000000..28d7a15a3b Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention_en.png new file mode 100755 index 0000000000..ea8585565d Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_attention_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder.png new file mode 100755 index 0000000000..60aee0017d Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder_en.png new file mode 100755 index 0000000000..6b73798fe6 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/encoder_decoder_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru.png new file mode 100644 index 0000000000..0cde685b84 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru_en.png new file mode 100755 index 0000000000..a6af429f23 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/gru_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt.png new file mode 100644 index 0000000000..bf56d73ebf Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt_en.png b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt_en.png new file mode 100755 index 0000000000..557310e044 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/image/nmt_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/machine_translation/index.md b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/index.md new file mode 100644 index 0000000000..fc161aaae9 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/machine_translation/index.md @@ -0,0 +1,448 @@ +# 机器翻译 + +本教程源代码目录在[book/machine_translation](https://github.com/PaddlePaddle/book/tree/develop/08.machine_translation), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +机器翻译(machine translation, MT)是用计算机来实现不同语言之间翻译的技术。被翻译的语言通常称为源语言(source language),翻译成的结果语言称为目标语言(target language)。机器翻译即实现从源语言到目标语言转换的过程,是自然语言处理的重要研究领域之一。 + +早期机器翻译系统多为基于规则的翻译系统,需要由语言学家编写两种语言之间的转换规则,再将这些规则录入计算机。该方法对语言学家的要求非常高,而且我们几乎无法总结一门语言会用到的所有规则,更何况两种甚至更多的语言。因此,传统机器翻译方法面临的主要挑战是无法得到一个完备的规则集合\[[1](#参考文献)\]。 + +为解决以上问题,统计机器翻译(Statistical Machine Translation, SMT)技术应运而生。在统计机器翻译技术中,转化规则是由机器自动从大规模的语料中学习得到的,而非我们人主动提供规则。因此,它克服了基于规则的翻译系统所面临的知识获取瓶颈的问题,但仍然存在许多挑战:1)人为设计许多特征(feature),但永远无法覆盖所有的语言现象;2)难以利用全局的特征;3)依赖于许多预处理环节,如词语对齐、分词或符号化(tokenization)、规则抽取、句法分析等,而每个环节的错误会逐步累积,对翻译的影响也越来越大。 + +近年来,深度学习技术的发展为解决上述挑战提供了新的思路。将深度学习应用于机器翻译任务的方法大致分为两类:1)仍以统计机器翻译系统为框架,只是利用神经网络来改进其中的关键模块,如语言模型、调序模型等(见图1的左半部分);2)不再以统计机器翻译系统为框架,而是直接用神经网络将源语言映射到目标语言,即端到端的神经网络机器翻译(End-to-End Neural Machine Translation, End-to-End NMT)(见图1的右半部分),简称为NMT模型。 +![nmt](./image/nmt.png) +

+图1. 基于神经网络的机器翻译系统 +

+ +本教程主要介绍NMT模型,以及如何用PaddlePaddle来训练一个NMT模型。 + +## 效果展示 + +以中英翻译(中文翻译到英文)的模型为例,当模型训练完毕时,如果输入如下已分词的中文句子: +```text +这些 是 希望 的 曙光 和 解脱 的 迹象 . +``` +如果设定显示翻译结果的条数(即[柱搜索算法](#柱搜索算法)的宽度)为3,生成的英语句子如下: +```text +0 -5.36816 These are signs of hope and relief . +1 -6.23177 These are the light of hope and relief . +2 -7.7914 These are the light of hope and the relief of hope . +``` +- 左起第一列是生成句子的序号;左起第二列是该条句子的得分(从大到小),分值越高越好;左起第三列是生成的英语句子。 +- 另外有两个特殊标志:``表示句子的结尾,``表示未登录词(unknown word),即未在训练字典中出现的词。 + +## 模型概览 + +本节依次介绍双向循环神经网络(Bi-directional Recurrent Neural Network),NMT模型中典型的编码器-解码器(Encoder-Decoder)框架以及柱搜索(beam search)算法。 + +### 双向循环神经网络 + +我们已经在[语义角色标注](https://github.com/PaddlePaddle/book/blob/develop/07.label_semantic_roles/README.cn.md)一章中介绍了一种双向循环神经网络,这里介绍Bengio团队在论文\[[2](#参考文献),[4](#参考文献)\]中提出的另一种结构。该结构的目的是输入一个序列,得到其在每个时刻的特征表示,即输出的每个时刻都用定长向量表示到该时刻的上下文语义信息。 + +具体来说,该双向循环神经网络分别在时间维以顺序和逆序——即前向(forward)和后向(backward)——依次处理输入序列,并将每个时间步RNN的输出拼接成为最终的输出层。这样每个时间步的输出节点,都包含了输入序列中当前时刻完整的过去和未来的上下文信息。下图展示的是一个按时间步展开的双向循环神经网络。该网络包含一个前向和一个后向RNN,其中有六个权重矩阵:输入到前向隐层和后向隐层的权重矩阵(`$W_1, W_3$`),隐层到隐层自己的权重矩阵(`$W_2,W_5$`),前向隐层和后向隐层到输出层的权重矩阵(`$W_4, W_6$`)。注意,该网络的前向隐层和后向隐层之间没有连接。 + +![bi_rnn](./image/bi_rnn.png) +

+图3. 按时间步展开的双向循环神经网络 +

+ +### 编码器-解码器框架 + +编码器-解码器(Encoder-Decoder)\[[2](#参考文献)\]框架用于解决由一个任意长度的源序列到另一个任意长度的目标序列的变换问题。即编码阶段将整个源序列编码成一个向量,解码阶段通过最大化预测序列概率,从中解码出整个目标序列。编码和解码的过程通常都使用RNN实现。 +![encoder_decoder](./image/encoder_decoder.png) +

+图4. 编码器-解码器框架 +

+ +#### 编码器 + +编码阶段分为三步: + +1. one-hot vector表示:将源语言句子`$x=\left \{ x_1,x_2,...,x_T \right \}$`的每个词`$x_i$`表示成一个列向量`$w_i\epsilon \left \{ 0,1 \right \}^{\left | V \right |},i=1,2,...,T$`。这个向量`$w_i$`的维度与词汇表大小`$\left | V \right |$` 相同,并且只有一个维度上有值1(该位置对应该词在词汇表中的位置),其余全是0。 + +2. 映射到低维语义空间的词向量:one-hot vector表示存在两个问题,1)生成的向量维度往往很大,容易造成维数灾难;2)难以刻画词与词之间的关系(如语义相似性,也就是无法很好地表达语义)。因此,需再one-hot vector映射到低维的语义空间,由一个固定维度的稠密向量(称为词向量)表示。记映射矩阵为`$C\epsilon R^{K\times \left | V \right |}$`,用`$s_i=Cw_i$`表示第`$i$`个词的词向量,`$K$`为向量维度。 + +3. 用RNN编码源语言词序列:这一过程的计算公式为`$h_i=\varnothing _\theta \left ( h_{i-1}, s_i \right )$`,其中`$h_0$`是一个全零的向量,`$\varnothing _\theta$`是一个非线性激活函数,最后得到的`$\mathbf{h}=\left \{ h_1,..., h_T \right \}$`就是RNN依次读入源语言`$T$`个词的状态编码序列。整句话的向量表示可以采用`$\mathbf{h}$`在最后一个时间步`$T$`的状态编码,或使用时间维上的池化(pooling)结果。 + +第3步也可以使用双向循环神经网络实现更复杂的句编码表示,具体可以用双向GRU实现。前向GRU按照词序列`$(x_1,x_2,...,x_T)$`的顺序依次编码源语言端词,并得到一系列隐层状态`$(\overrightarrow{h_1},\overrightarrow{h_2},...,\overrightarrow{h_T})$`。类似的,后向GRU按照`$(x_T,x_{T-1},...,x_1)$`的顺序依次编码源语言端词,得到`$(\overleftarrow{h_1},\overleftarrow{h_2},...,\overleftarrow{h_T})$`。最后对于词`$x_i$`,通过拼接两个GRU的结果得到它的隐层状态,即`$h_i=\left [ \overrightarrow{h_i^T},\overleftarrow{h_i^T} \right ]^{T}$`。 + +![encoder_attention](./image/encoder_attention.png) +

+图5. 使用双向GRU的编码器 +

+ +#### 解码器 + +机器翻译任务的训练过程中,解码阶段的目标是最大化下一个正确的目标语言词的概率。思路是: + +1. 每一个时刻,根据源语言句子的编码信息(又叫上下文向量,context vector)`$c$`、真实目标语言序列的第`$i$`个词`$u_i$`和`$i$`时刻RNN的隐层状态`$z_i$`,计算出下一个隐层状态`$z_{i+1}$`。计算公式如下: + +$$z_{i+1}=\phi _{\theta '}\left ( c,u_i,z_i \right )$$ + +其中`$\phi _{\theta '}$`是一个非线性激活函数;`$c=q\mathbf{h}$`是源语言句子的上下文向量,在不使用[注意力机制](#注意力机制)时,如果[编码器](#编码器)的输出是源语言句子编码后的最后一个元素,则可以定义`$c=h_T$`;`$u_i$`是目标语言序列的第`$i$`个单词,`$u_0$`是目标语言序列的开始标记``,表示解码开始;`$z_i$`是`$i$`时刻解码RNN的隐层状态,`$z_0$`是一个全零的向量。 + +2. 将`$z_{i+1}$`通过`softmax`归一化,得到目标语言序列的第`$i+1$`个单词的概率分布`$p_{i+1}$`。概率分布公式如下: + +$$p\left ( u_{i+1}|u_{<i+1},\mathbf{x} \right )=softmax(W_sz_{i+1}+b_z)$$ + +其中`$W_sz_{i+1}+b_z$`是对每个可能的输出单词进行打分,再用softmax归一化就可以得到第`$i+1$`个词的概率`$p_{i+1}$`。 + +3. 根据`$p_{i+1}$`和`$u_{i+1}$`计算代价。 +4. 重复步骤1~3,直到目标语言序列中的所有词处理完毕。 + +机器翻译任务的生成过程,通俗来讲就是根据预先训练的模型来翻译源语言句子。生成过程中的解码阶段和上述训练过程的有所差异,具体介绍请见[柱搜索算法](#柱搜索算法)。 + +### 柱搜索算法 + +柱搜索([beam search](http://en.wikipedia.org/wiki/Beam_search))是一种启发式图搜索算法,用于在图或树中搜索有限集合中的最优扩展节点,通常用在解空间非常大的系统(如机器翻译、语音识别)中,原因是内存无法装下图或树中所有展开的解。如在机器翻译任务中希望翻译“`你好`”,就算目标语言字典中只有3个词(``, ``, `hello`),也可能生成无限句话(`hello`循环出现的次数不定),为了找到其中较好的翻译结果,我们可采用柱搜索算法。 + +柱搜索算法使用广度优先策略建立搜索树,在树的每一层,按照启发代价(heuristic cost)(本教程中,为生成词的log概率之和)对节点进行排序,然后仅留下预先确定的个数(文献中通常称为beam width、beam size、柱宽度等)的节点。只有这些节点会在下一层继续扩展,其他节点就被剪掉了,也就是说保留了质量较高的节点,剪枝了质量较差的节点。因此,搜索所占用的空间和时间大幅减少,但缺点是无法保证一定获得最优解。 + +使用柱搜索算法的解码阶段,目标是最大化生成序列的概率。思路是: + +1. 每一个时刻,根据源语言句子的编码信息`$c$`、生成的第`$i$`个目标语言序列单词`$u_i$`和`$i$`时刻RNN的隐层状态`$z_i$`,计算出下一个隐层状态`$z_{i+1}$`。 +2. 将`$z_{i+1}$`通过`softmax`归一化,得到目标语言序列的第`$i+1$`个单词的概率分布`$p_{i+1}$`。 +3. 根据`$p_{i+1}$`采样出单词`$u_{i+1}$`。 +4. 重复步骤1~3,直到获得句子结束标记``或超过句子的最大生成长度为止。 + +注意:`$z_{i+1}$`和`$p_{i+1}$`的计算公式同[解码器](#解码器)中的一样。且由于生成时的每一步都是通过贪心法实现的,因此并不能保证得到全局最优解。 + +## 数据介绍 + +本教程使用[WMT-14](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/)数据集中的[bitexts(after selection)](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/bitexts.tgz)作为训练集,[dev+test data](http://www-lium.univ-lemans.fr/~schwenk/cslm_joint_paper/data/dev+test.tgz)作为测试集和生成集。 + +### 数据预处理 + +我们的预处理流程包括两步: +- 将每个源语言到目标语言的平行语料库文件合并为一个文件: +- 合并每个`XXX.src`和`XXX.trg`文件为`XXX`。 +- `XXX`中的第`$i$`行内容为`XXX.src`中的第`$i$`行和`XXX.trg`中的第`$i$`行连接,用'\t'分隔。 +- 创建训练数据的“源字典”和“目标字典”。每个字典都有**DICTSIZE**个单词,包括:语料中词频最高的(DICTSIZE - 3)个单词,和3个特殊符号``(序列的开始)、``(序列的结束)和``(未登录词)。 + +### 示例数据 + +因为完整的数据集数据量较大,为了验证训练流程,PaddlePaddle接口paddle.dataset.wmt14中默认提供了一个经过预处理的[较小规模的数据集](http://paddlepaddle.bj.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz)。 + +该数据集有193319条训练数据,6003条测试数据,词典长度为30000。因为数据规模限制,使用该数据集训练出来的模型效果无法保证。 + +## 模型配置说明 + +下面我们开始根据输入数据的形式配置模型。首先引入所需的库函数以及定义全局变量。 + +```python +import contextlib + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.framework as framework +import paddle.fluid.layers as pd +from paddle.fluid.executor import Executor +from functools import partial +import os + +dict_size = 30000 +source_dict_dim = target_dict_dim = dict_size +hidden_dim = 32 +word_dim = 16 +batch_size = 2 +max_length = 8 +topk_size = 50 +beam_size = 2 + +decoder_size = hidden_dim +``` + +然后如下实现编码器框架: + +```python +def encoder(is_sparse): +src_word_id = pd.data( +name="src_word_id", shape=[1], dtype='int64', lod_level=1) +src_embedding = pd.embedding( +input=src_word_id, +size=[dict_size, word_dim], +dtype='float32', +is_sparse=is_sparse, +param_attr=fluid.ParamAttr(name='vemb')) + +fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') +lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4) +encoder_out = pd.sequence_last_step(input=lstm_hidden0) +return encoder_out +``` + +再实现训练模式下的解码器: + +```python +def train_decoder(context, is_sparse): +trg_language_word = pd.data( +name="target_language_word", shape=[1], dtype='int64', lod_level=1) +trg_embedding = pd.embedding( +input=trg_language_word, +size=[dict_size, word_dim], +dtype='float32', +is_sparse=is_sparse, +param_attr=fluid.ParamAttr(name='vemb')) + +rnn = pd.DynamicRNN() +with rnn.block(): +current_word = rnn.step_input(trg_embedding) +pre_state = rnn.memory(init=context) +current_state = pd.fc(input=[current_word, pre_state], +size=decoder_size, +act='tanh') + +current_score = pd.fc(input=current_state, +size=target_dict_dim, +act='softmax') +rnn.update_memory(pre_state, current_state) +rnn.output(current_score) + +return rnn() +``` + +实现推测模式下的解码器: + +```python +def decode(context, is_sparse): +init_state = context +array_len = pd.fill_constant(shape=[1], dtype='int64', value=max_length) +counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True) + +# fill the first element with init_state +state_array = pd.create_array('float32') +pd.array_write(init_state, array=state_array, i=counter) + +# ids, scores as memory +ids_array = pd.create_array('int64') +scores_array = pd.create_array('float32') + +init_ids = pd.data(name="init_ids", shape=[1], dtype="int64", lod_level=2) +init_scores = pd.data( +name="init_scores", shape=[1], dtype="float32", lod_level=2) + +pd.array_write(init_ids, array=ids_array, i=counter) +pd.array_write(init_scores, array=scores_array, i=counter) + +cond = pd.less_than(x=counter, y=array_len) + +while_op = pd.While(cond=cond) +with while_op.block(): +pre_ids = pd.array_read(array=ids_array, i=counter) +pre_state = pd.array_read(array=state_array, i=counter) +pre_score = pd.array_read(array=scores_array, i=counter) + +# expand the lod of pre_state to be the same with pre_score +pre_state_expanded = pd.sequence_expand(pre_state, pre_score) + +pre_ids_emb = pd.embedding( +input=pre_ids, +size=[dict_size, word_dim], +dtype='float32', +is_sparse=is_sparse) + +# use rnn unit to update rnn +current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], +size=decoder_size, +act='tanh') +current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) +# use score to do beam search +current_score = pd.fc(input=current_state_with_lod, +size=target_dict_dim, +act='softmax') +topk_scores, topk_indices = pd.topk(current_score, k=topk_size) +selected_ids, selected_scores = pd.beam_search( +pre_ids, topk_indices, topk_scores, beam_size, end_id=10, level=0) + +pd.increment(x=counter, value=1, in_place=True) + +# update the memories +pd.array_write(current_state, array=state_array, i=counter) +pd.array_write(selected_ids, array=ids_array, i=counter) +pd.array_write(selected_scores, array=scores_array, i=counter) + +pd.less_than(x=counter, y=array_len, cond=cond) + +translation_ids, translation_scores = pd.beam_search_decode( +ids=ids_array, scores=scores_array) + +return translation_ids, translation_scores +``` + +进而,我们定义一个`train_program`来使用`inference_program`计算出的结果,在标记数据的帮助下来计算误差。我们还定义了一个`optimizer_func`来定义优化器。 + +```python +def train_program(is_sparse): +context = encoder(is_sparse) +rnn_out = train_decoder(context, is_sparse) +label = pd.data( +name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) +cost = pd.cross_entropy(input=rnn_out, label=label) +avg_cost = pd.mean(cost) +return avg_cost + + +def optimizer_func(): +return fluid.optimizer.Adagrad( +learning_rate=1e-4, +regularization=fluid.regularizer.L2DecayRegularizer( +regularization_coeff=0.1)) +``` + +## 训练模型 + +### 定义训练环境 +定义您的训练环境,可以指定训练是发生在CPU还是GPU上。 + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### 定义数据提供器 +下一步是为训练和测试定义数据提供器。提供器读入一个大小为 `BATCH_SIZE`的数据。`paddle.dataset.wmt.train` 每次会在乱序化后提供一个大小为`BATCH_SIZE`的数据,乱序化的大小为缓存大小`buf_size`。 + +```python +train_reader = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.wmt14.train(dict_size), buf_size=1000), +batch_size=batch_size) +``` + +### 构造训练器(trainer) +训练器需要一个训练程序和一个训练优化函数。 + +```python +is_sparse = False +trainer = fluid.Trainer( +train_func=partial(train_program, is_sparse), +place=place, +optimizer_func=optimizer_func) +``` + +### 提供数据 + +`feed_order`用来定义每条产生的数据和`paddle.layer.data`之间的映射关系。比如,`wmt14.train`产生的第一列的数据对应的是`src_word_id`这个特征。 + +```python +feed_order = [ +'src_word_id', 'target_language_word', 'target_language_next_word' +] +``` + +### 事件处理器 +回调函数`event_handler`在一个之前定义好的事件发生后会被调用。例如,我们可以在每步训练结束后查看误差。 + +```python +def event_handler(event): +if isinstance(event, fluid.EndStepEvent): +if event.step % 10 == 0: +print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step)) + +if event.step == 20: +trainer.stop() +``` + +### 开始训练 +最后,我们传入训练循环数(`num_epoch`)和一些别的参数,调用 `trainer.train` 来开始训练。 + +```python +EPOCH_NUM = 1 + +trainer.train( +reader=train_reader, +num_epochs=EPOCH_NUM, +event_handler=event_handler, +feed_order=feed_order) +``` + +## 应用模型 + +### 定义解码部分 + +使用上面定义的 `encoder` 和 `decoder` 函数来推测翻译后的对应id和分数. + +```python +context = encoder(is_sparse) +translation_ids, translation_scores = decode(context, is_sparse) +``` + +### 定义数据 + +我们先初始化id和分数来生成tensors来作为输入数据。在这个预测例子中,我们用`wmt14.test`数据中的第一个记录来做推测,最后我们用"源字典"和"目标字典"来列印对应的句子结果。 + +```python +init_ids_data = np.array([1 for _ in range(batch_size)], dtype='int64') +init_scores_data = np.array( +[1. for _ in range(batch_size)], dtype='float32') +init_ids_data = init_ids_data.reshape((batch_size, 1)) +init_scores_data = init_scores_data.reshape((batch_size, 1)) +init_lod = [1] * batch_size +init_lod = [init_lod, init_lod] + +init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) +init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) + +test_data = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.wmt14.test(dict_size), buf_size=1000), +batch_size=batch_size) + +feed_order = ['src_word_id'] +feed_list = [ +framework.default_main_program().global_block().var(var_name) +for var_name in feed_order +] +feeder = fluid.DataFeeder(feed_list, place) + +src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) +``` + +### 测试 +现在我们可以进行预测了。我们要在`feed_order`提供对应参数,放在`executor`上运行以取得id和分数结果 + +```python +exe = Executor(place) +exe.run(framework.default_startup_program()) + +for data in test_data(): +feed_data = map(lambda x: [x[0]], data) +feed_dict = feeder.feed(feed_data) +feed_dict['init_ids'] = init_ids +feed_dict['init_scores'] = init_scores + +results = exe.run( +framework.default_main_program(), +feed=feed_dict, +fetch_list=[translation_ids, translation_scores], +return_numpy=False) + +result_ids = np.array(results[0]) +result_scores = np.array(results[1]) + +print("Original sentence:") +print(" ".join([src_dict[w] for w in feed_data[0][0]])) +print("Translated sentence:") +print(" ".join([trg_dict[w] for w in result_ids])) +print("Corresponding score: ", result_scores) + +break +``` + +## 总结 + +端到端的神经网络机器翻译是近几年兴起的一种全新的机器翻译方法。本章中,我们介绍了NMT中典型的“编码器-解码器”框架。由于NMT是一个典型的Seq2Seq(Sequence to Sequence,序列到序列)学习问题,因此,Seq2Seq中的query改写(query rewriting)、摘要、单轮对话等问题都可以用本教程的模型来解决。 + +## 参考文献 + +1. Koehn P. [Statistical machine translation](https://books.google.com.hk/books?id=4v_Cx1wIMLkC&printsec=frontcover&hl=zh-CN&source=gbs_ge_summary_r&cad=0#v=onepage&q&f=false)[M]. Cambridge University Press, 2009. +2. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://www.aclweb.org/anthology/D/D14/D14-1179.pdf)[C]//Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), 2014: 1724-1734. +3. Chung J, Gulcehre C, Cho K H, et al. [Empirical evaluation of gated recurrent neural networks on sequence modeling](https://arxiv.org/abs/1412.3555)[J]. arXiv preprint arXiv:1412.3555, 2014. +4. Bahdanau D, Cho K, Bengio Y. [Neural machine translation by jointly learning to align and translate](https://arxiv.org/abs/1409.0473)[C]//Proceedings of ICLR 2015, 2015. +5. Papineni K, Roukos S, Ward T, et al. [BLEU: a method for automatic evaluation of machine translation](http://dl.acm.org/citation.cfm?id=1073135)[C]//Proceedings of the 40th annual meeting on association for computational linguistics. Association for Computational Linguistics, 2002: 311-318. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/.gitignore new file mode 100644 index 0000000000..f23901aeb3 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/.gitignore @@ -0,0 +1,2 @@ +.idea +.ipynb_checkpoints diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.en.png new file mode 100644 index 0000000000..c213608e76 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.png new file mode 100644 index 0000000000..8aedb22043 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/Deep_candidate_generation_model_architecture.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.en.png new file mode 100644 index 0000000000..4298567ac5 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.png new file mode 100644 index 0000000000..a98e7cc676 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/YouTube_Overview.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/output_32_0.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/output_32_0.png new file mode 100644 index 0000000000..7fd97b9cc3 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/output_32_0.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network.png new file mode 100644 index 0000000000..90c9b09fb7 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network_en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network_en.png new file mode 100755 index 0000000000..6fc8e11967 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/rec_regression_network_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn.png new file mode 100644 index 0000000000..61e63d9147 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn_en.png b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn_en.png new file mode 100644 index 0000000000..fbcae2be81 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/image/text_cnn_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/recommender_system/index.md b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/index.md new file mode 100644 index 0000000000..09a07f3dc3 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/recommender_system/index.md @@ -0,0 +1,528 @@ +# 个性化推荐 + +本教程源代码目录在[book/recommender_system](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +在网络技术不断发展和电子商务规模不断扩大的背景下,商品数量和种类快速增长,用户需要花费大量时间才能找到自己想买的商品,这就是信息超载问题。为了解决这个难题,推荐系统(Recommender System)应运而生。 + +个性化推荐系统是信息过滤系统(Information Filtering System)的子集,它可以用在很多领域,如电影、音乐、电商和 Feed 流推荐等。推荐系统通过分析、挖掘用户行为,发现用户的个性化需求与兴趣特点,将用户可能感兴趣的信息或商品推荐给用户。与搜索引擎不同,推荐系统不需要用户准确地描述出自己的需求,而是根据分析历史行为建模,主动提供满足用户兴趣和需求的信息。 + +传统的推荐系统方法主要有: + +- 协同过滤推荐(Collaborative Filtering Recommendation):该方法收集分析用户历史行为、活动、偏好,计算一个用户与其他用户的相似度,利用目标用户的相似用户对商品评价的加权评价值,来预测目标用户对特定商品的喜好程度。优点是可以给用户推荐未浏览过的新产品;缺点是对于没有任何行为的新用户存在冷启动的问题,同时也存在用户与商品之间的交互数据不够多造成的稀疏问题,会导致模型难以找到相近用户。 +- 基于内容过滤推荐[[1](#参考文献)](Content-based Filtering Recommendation):该方法利用商品的内容描述,抽象出有意义的特征,通过计算用户的兴趣和商品描述之间的相似度,来给用户做推荐。优点是简单直接,不需要依据其他用户对商品的评价,而是通过商品属性进行商品相似度度量,从而推荐给用户所感兴趣商品的相似商品;缺点是对于没有任何行为的新用户同样存在冷启动的问题。 +- 组合推荐[[2](#参考文献)](Hybrid Recommendation):运用不同的输入和技术共同进行推荐,以弥补各自推荐技术的缺点。 + +其中协同过滤是应用最广泛的技术之一,它又可以分为多个子类:基于用户 (User-Based)的推荐[[3](#参考文献)] 、基于物品(Item-Based)的推荐[[4](#参考文献)]、基于社交网络关系(Social-Based)的推荐[[5](#参考文献)]、基于模型(Model-based)的推荐等。1994年明尼苏达大学推出的GroupLens系统[[3](#参考文献)]一般被认为是推荐系统成为一个相对独立的研究方向的标志。该系统首次提出了基于协同过滤来完成推荐任务的思想,此后,基于该模型的协同过滤推荐引领了推荐系统十几年的发展方向。 + +深度学习具有优秀的自动提取特征的能力,能够学习多层次的抽象特征表示,并对异质或跨域的内容信息进行学习,可以一定程度上处理推荐系统冷启动问题[[6](#参考文献)]。本教程主要介绍个性化推荐的深度学习模型,以及如何使用PaddlePaddle实现模型。 + +## 效果展示 + +我们使用包含用户信息、电影信息与电影评分的数据集作为个性化推荐的应用场景。当我们训练好模型后,只需要输入对应的用户ID和电影ID,就可以得出一个匹配的分数(范围[0,5],分数越高视为兴趣越大),然后根据所有电影的推荐得分排序,推荐给用户可能感兴趣的电影。 + +``` +Input movie_id: 1962 +Input user_id: 1 +Prediction Score is 4.25 +``` + +## 模型概览 + +本章中,我们首先介绍YouTube的视频推荐系统[[7](#参考文献)],然后介绍我们实现的融合推荐模型。 + +### YouTube的深度神经网络推荐系统 + +YouTube是世界上最大的视频上传、分享和发现网站,YouTube推荐系统为超过10亿用户从不断增长的视频库中推荐个性化的内容。整个系统由两个神经网络组成:候选生成网络和排序网络。候选生成网络从百万量级的视频库中生成上百个候选,排序网络对候选进行打分排序,输出排名最高的数十个结果。系统结构如图1所示: + +![YouTube_Overview](./image/YouTube_Overview.png) +

+图1. YouTube 推荐系统结构 +

+ +#### 候选生成网络(Candidate Generation Network) + +候选生成网络将推荐问题建模为一个类别数极大的多类分类问题:对于一个Youtube用户,使用其观看历史(视频ID)、搜索词记录(search tokens)、人口学信息(如地理位置、用户登录设备)、二值特征(如性别,是否登录)和连续特征(如用户年龄)等,对视频库中所有视频进行多分类,得到每一类别的分类结果(即每一个视频的推荐概率),最终输出概率较高的几百个视频。 + +首先,将观看历史及搜索词记录这类历史信息,映射为向量后取平均值得到定长表示;同时,输入人口学特征以优化新用户的推荐效果,并将二值特征和连续特征归一化处理到[0, 1]范围。接下来,将所有特征表示拼接为一个向量,并输入给非线形多层感知器(MLP,详见[识别数字](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md)教程)处理。最后,训练时将MLP的输出给softmax做分类,预测时计算用户的综合特征(MLP的输出)与所有视频的相似度,取得分最高的`$k$`个作为候选生成网络的筛选结果。图2显示了候选生成网络结构。 + +![Deep_candidate_generation_model_architecture](./image/Deep_candidate_generation_model_architecture.png) +

+图2. 候选生成网络结构 +

+ +对于一个用户`$U$`,预测此刻用户要观看的视频`$\omega$`为视频`$i$`的概率公式为: + +$$P(\omega=i|u)=\frac{e^{v_{i}u}}{\sum_{j \in V}e^{v_{j}u}}$$ + +其中`$u$`为用户`$U$`的特征表示,`$V$`为视频库集合,`$v_i$`为视频库中第`$i$`个视频的特征表示。`$u$`和`$v_i$`为长度相等的向量,两者点积可以通过全连接层实现。 + +考虑到softmax分类的类别数非常多,为了保证一定的计算效率:1)训练阶段,使用负样本类别采样将实际计算的类别数缩小至数千;2)推荐(预测)阶段,忽略softmax的归一化计算(不影响结果),将类别打分问题简化为点积(dot product)空间中的最近邻(nearest neighbor)搜索问题,取与`$u$`最近的`$k$`个视频作为生成的候选。 + +#### 排序网络(Ranking Network) +排序网络的结构类似于候选生成网络,但是它的目标是对候选进行更细致的打分排序。和传统广告排序中的特征抽取方法类似,这里也构造了大量的用于视频排序的相关特征(如视频 ID、上次观看时间等)。这些特征的处理方式和候选生成网络类似,不同之处是排序网络的顶部是一个加权逻辑回归(weighted logistic regression),它对所有候选视频进行打分,从高到底排序后将分数较高的一些视频返回给用户。 + +### 融合推荐模型 +本节会使卷积神经网络(Convolutional Neural Networks)来学习电影名称的表示。下面会依次介绍文本卷积神经网络以及融合推荐模型。 + +#### 文本卷积神经网络(CNN) + +卷积神经网络经常用来处理具有类似网格拓扑结构(grid-like topology)的数据。例如,图像可以视为二维网格的像素点,自然语言可以视为一维的词序列。卷积神经网络可以提取多种局部特征,并对其进行组合抽象得到更高级的特征表示。实验表明,卷积神经网络能高效地对图像及文本问题进行建模处理。 + +卷积神经网络主要由卷积(convolution)和池化(pooling)操作构成,其应用及组合方式灵活多变,种类繁多。本小结我们以如图3所示的网络进行讲解: + +![text_cnn](./image/text_cnn.png) +

+图3. 卷积神经网络文本分类模型 +

+ +假设待处理句子的长度为`$n$`,其中第`$i$`个词的词向量(word embedding)为`$x_i\in\mathbb{R}^k$`,`$k$`为维度大小。 + +首先,进行词向量的拼接操作:将每`$h$`个词拼接起来形成一个大小为`$h$`的词窗口,记为`$x_{i:i+h-1}$`,它表示词序列`$x_{i},x_{i+1},\ldots,x_{i+h-1}$`的拼接,其中,`$i$`表示词窗口中第一个词在整个句子中的位置,取值范围从`$1$`到`$n-h+1$`,`$x_{i:i+h-1}\in\mathbb{R}^{hk}$`。 + +其次,进行卷积操作:把卷积核(kernel)`$w\in\mathbb{R}^{hk}$`应用于包含`$h$`个词的窗口`$x_{i:i+h-1}$`,得到特征`$c_i=f(w\cdot x_{i:i+h-1}+b)$`,其中`$b\in\mathbb{R}$`为偏置项(bias),`$f$`为非线性激活函数,如`$sigmoid$`。将卷积核应用于句子中所有的词窗口`${x_{1:h},x_{2:h+1},\ldots,x_{n-h+1:n}}$`,产生一个特征图(feature map): + +$$c=[c_1,c_2,\ldots,c_{n-h+1}], c \in \mathbb{R}^{n-h+1}$$ + +接下来,对特征图采用时间维度上的最大池化(max pooling over time)操作得到此卷积核对应的整句话的特征`$\hat c$`,它是特征图中所有元素的最大值: + +$$\hat c=max(c)$$ + +#### 模型概览 + +在融合推荐模型的电影推荐系统中: + +1. 首先,使用用户特征和电影特征作为神经网络的输入,其中: + +- 用户特征融合了四个属性信息,分别是用户ID、性别、职业和年龄。 + +- 电影特征融合了三个属性信息,分别是电影ID、电影类型ID和电影名称。 + +2. 对用户特征,将用户ID映射为维度大小为256的向量表示,输入全连接层,并对其他三个属性也做类似的处理。然后将四个属性的特征表示分别全连接并相加。 + +3. 对电影特征,将电影ID以类似用户ID的方式进行处理,电影类型ID以向量的形式直接输入全连接层,电影名称用文本卷积神经网络得到其定长向量表示。然后将三个属性的特征表示分别全连接并相加。 + +4. 得到用户和电影的向量表示后,计算二者的余弦相似度作为推荐系统的打分。最后,用该相似度打分和用户真实打分的差异的平方作为该回归模型的损失函数。 + +![rec_regression_network](./image/rec_regression_network.png) +

+图4. 融合推荐模型 +

+ +## 数据准备 + +### 数据介绍与下载 + +我们以 [MovieLens 百万数据集(ml-1m)](http://files.grouplens.org/datasets/movielens/ml-1m.zip)为例进行介绍。ml-1m 数据集包含了 6,000 位用户对 4,000 部电影的 1,000,000 条评价(评分范围 1~5 分,均为整数),由 GroupLens Research 实验室搜集整理。 + +Paddle在API中提供了自动加载数据的模块。数据模块为 `paddle.dataset.movielens` + + +```python +import paddle +movie_info = paddle.dataset.movielens.movie_info() +print movie_info.values()[0] +``` + + +```python +# Run this block to show dataset's documentation +# help(paddle.dataset.movielens) +``` + +在原始数据中包含电影的特征数据,用户的特征数据,和用户对电影的评分。 + +例如,其中某一个电影特征为: + + +```python +movie_info = paddle.dataset.movielens.movie_info() +print movie_info.values()[0] +``` + + + + +这表示,电影的id是1,标题是《Toy Story》,该电影被分为到三个类别中。这三个类别是动画,儿童,喜剧。 + + +```python +user_info = paddle.dataset.movielens.user_info() +print user_info.values()[0] +``` + + + + +这表示,该用户ID是1,女性,年龄比18岁还年轻。职业ID是10。 + + +其中,年龄使用下列分布 +* 1: "Under 18" +* 18: "18-24" +* 25: "25-34" +* 35: "35-44" +* 45: "45-49" +* 50: "50-55" +* 56: "56+" + +职业是从下面几种选项里面选则得出: +* 0: "other" or not specified +* 1: "academic/educator" +* 2: "artist" +* 3: "clerical/admin" +* 4: "college/grad student" +* 5: "customer service" +* 6: "doctor/health care" +* 7: "executive/managerial" +* 8: "farmer" +* 9: "homemaker" +* 10: "K-12 student" +* 11: "lawyer" +* 12: "programmer" +* 13: "retired" +* 14: "sales/marketing" +* 15: "scientist" +* 16: "self-employed" +* 17: "technician/engineer" +* 18: "tradesman/craftsman" +* 19: "unemployed" +* 20: "writer" + +而对于每一条训练/测试数据,均为 <用户特征> + <电影特征> + 评分。 + +例如,我们获得第一条训练数据: + + +```python +train_set_creator = paddle.dataset.movielens.train() +train_sample = next(train_set_creator()) +uid = train_sample[0] +mov_id = train_sample[len(user_info[uid].value())] +print "User %s rates Movie %s with Score %s"%(user_info[uid], movie_info[mov_id], train_sample[-1]) +``` + +User rates Movie with Score [5.0] + + +即用户1对电影1193的评价为5分。 + +## 模型配置说明 + +下面我们开始根据输入数据的形式配置模型。首先引入所需的库函数以及定义全局变量。 + + +```python +import math +import sys +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets + +IS_SPARSE = True +USE_GPU = False +BATCH_SIZE = 256 +``` + +然后为我们的用户特征综合模型定义模型配置 + +```python +def get_usr_combined_features(): + +USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 + +uid = layers.data(name='user_id', shape=[1], dtype='int64') + +usr_emb = layers.embedding( +input=uid, +dtype='float32', +size=[USR_DICT_SIZE, 32], +param_attr='user_table', +is_sparse=IS_SPARSE) + +usr_fc = layers.fc(input=usr_emb, size=32) + +USR_GENDER_DICT_SIZE = 2 + +usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') + +usr_gender_emb = layers.embedding( +input=usr_gender_id, +size=[USR_GENDER_DICT_SIZE, 16], +param_attr='gender_table', +is_sparse=IS_SPARSE) + +usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) + +USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) +usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") + +usr_age_emb = layers.embedding( +input=usr_age_id, +size=[USR_AGE_DICT_SIZE, 16], +is_sparse=IS_SPARSE, +param_attr='age_table') + +usr_age_fc = layers.fc(input=usr_age_emb, size=16) + +USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 +usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") + +usr_job_emb = layers.embedding( +input=usr_job_id, +size=[USR_JOB_DICT_SIZE, 16], +param_attr='job_table', +is_sparse=IS_SPARSE) + +usr_job_fc = layers.fc(input=usr_job_emb, size=16) + +concat_embed = layers.concat( +input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) + +usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + +return usr_combined_features +``` + +如上述代码所示,对于每个用户,我们输入4维特征。其中包括user_id,gender_id,age_id,job_id。这几维特征均是简单的整数值。为了后续神经网络处理这些特征方便,我们借鉴NLP中的语言模型,将这几维离散的整数值,变换成embedding取出。分别形成usr_emb, usr_gender_emb, usr_age_emb, usr_job_emb。 + +然后,我们对于所有的用户特征,均输入到一个全连接层(fc)中。将所有特征融合为一个200维度的特征。 + +进而,我们对每一个电影特征做类似的变换,网络配置为: + + +```python +def get_mov_combined_features(): + +MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 + +mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') + +mov_emb = layers.embedding( +input=mov_id, +dtype='float32', +size=[MOV_DICT_SIZE, 32], +param_attr='movie_table', +is_sparse=IS_SPARSE) + +mov_fc = layers.fc(input=mov_emb, size=32) + +CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) + +category_id = layers.data( +name='category_id', shape=[1], dtype='int64', lod_level=1) + +mov_categories_emb = layers.embedding( +input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) + +mov_categories_hidden = layers.sequence_pool( +input=mov_categories_emb, pool_type="sum") + +MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) + +mov_title_id = layers.data( +name='movie_title', shape=[1], dtype='int64', lod_level=1) + +mov_title_emb = layers.embedding( +input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) + +mov_title_conv = nets.sequence_conv_pool( +input=mov_title_emb, +num_filters=32, +filter_size=3, +act="tanh", +pool_type="sum") + +concat_embed = layers.concat( +input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) + +mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + +return mov_combined_features +``` + +电影标题名称(title)是一个序列的整数,整数代表的是这个词在索引序列中的下标。这个序列会被送入 `sequence_conv_pool` 层,这个层会在时间维度上使用卷积和池化。因为如此,所以输出会是固定长度,尽管输入的序列长度各不相同。 + +最后,我们定义一个`inference_program`来使用余弦相似度计算用户特征与电影特征的相似性。 + +```python +def inference_program(): +usr_combined_features = get_usr_combined_features() +mov_combined_features = get_mov_combined_features() + +inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) +scale_infer = layers.scale(x=inference, scale=5.0) + +return scale_infer +``` + +进而,我们定义一个`train_program`来使用`inference_program`计算出的结果,在标记数据的帮助下来计算误差。我们还定义了一个`optimizer_func`来定义优化器。 + +```python +def train_program(): + +scale_infer = inference_program() + +label = layers.data(name='score', shape=[1], dtype='float32') +square_cost = layers.square_error_cost(input=scale_infer, label=label) +avg_cost = layers.mean(square_cost) + +return [avg_cost, scale_infer] + + +def optimizer_func(): +return fluid.optimizer.SGD(learning_rate=0.2) +``` + + +## 训练模型 + +### 定义训练环境 +定义您的训练环境,可以指定训练是发生在CPU还是GPU上。 + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### 定义数据提供器 +下一步是为训练和测试定义数据提供器。提供器读入一个大小为 `BATCH_SIZE`的数据。`paddle.dataset.movielens.train` 每次会在乱序化后提供一个大小为`BATCH_SIZE`的数据,乱序化的大小为缓存大小`buf_size`。 + +```python +train_reader = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.movielens.train(), buf_size=8192), +batch_size=BATCH_SIZE) + +test_reader = paddle.batch( +paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) +``` + +### 构造训练器(trainer) +训练器需要一个训练程序和一个训练优化函数。 + +```python +trainer = fluid.Trainer( +train_func=train_program, place=place, optimizer_func=optimizer_func) +``` + +### 提供数据 + +`feed_order`用来定义每条产生的数据和`paddle.layer.data`之间的映射关系。比如,`movielens.train`产生的第一列的数据对应的是`user_id`这个特征。 + +```python +feed_order = [ +'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', +'movie_title', 'score' +] +``` + +### 事件处理器 +回调函数`event_handler`在一个之前定义好的事件发生后会被调用。例如,我们可以在每步训练结束后查看误差。 + +```python +# Specify the directory path to save the parameters +params_dirname = "recommender_system.inference.model" + +from paddle.v2.plot import Ploter +test_title = "Test cost" +plot_cost = Ploter(test_title) + + +def event_handler(event): +if isinstance(event, fluid.EndStepEvent): +avg_cost_set = trainer.test( +reader=test_reader, feed_order=feed_order) + +# get avg cost +avg_cost = np.array(avg_cost_set).mean() + +plot_cost.append(test_title, event.step, avg_cost_set[0]) +plot_cost.plot() + +print("avg_cost: %s" % avg_cost) +print('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, +float(avg_cost))) + +if event.step == 20: # Adjust this number for accuracy +trainer.save_params(params_dirname) +trainer.stop() +``` + +### 开始训练 +最后,我们传入训练循环数(`num_epoch`)和一些别的参数,调用 `trainer.train` 来开始训练。 + +```python +trainer.train( +num_epochs=1, +event_handler=event_handler, +reader=train_reader, +feed_order=feed_order) +``` + +## 应用模型 + +### 构建预测器 +传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。 + +```python +inferencer = fluid.Inferencer( +inference_program, param_path=params_dirname, place=place) +``` + +### 生成测试用输入数据 +使用 create_lod_tensor(data, lod, place) 的API来生成细节层次的张量。`data`是一个序列,每个元素是一个索引号的序列。`lod`是细节层次的信息,对应于`data`。比如,data = [[10, 2, 3], [2, 3]] 意味着它包含两个序列,长度分别是3和2。于是相应地 lod = [[3, 2]],它表明其包含一层细节信息,意味着 `data` 有两个序列,长度分别是3和2。 + +在这个预测例子中,我们试着预测用户ID为1的用户对于电影'Hunchback of Notre Dame'的评分 + +```python +infer_movie_id = 783 +infer_movie_name = paddle.dataset.movielens.movie_info()[infer_movie_id].title +user_id = fluid.create_lod_tensor([[1]], [[1]], place) +gender_id = fluid.create_lod_tensor([[1]], [[1]], place) +age_id = fluid.create_lod_tensor([[0]], [[1]], place) +job_id = fluid.create_lod_tensor([[10]], [[1]], place) +movie_id = fluid.create_lod_tensor([[783]], [[1]], place) # Hunchback of Notre Dame +category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) # Animation, Children's, Musical +movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], [[5]], +place) # 'hunchback','of','notre','dame','the' +``` + +### 测试 +现在我们可以进行预测了。我们要提供的`feed_order`应该和训练过程一致。 + + +```python +results = inferencer.infer( +{ +'user_id': user_id, +'gender_id': gender_id, +'age_id': age_id, +'job_id': job_id, +'movie_id': movie_id, +'category_id': category_id, +'movie_title': movie_title +}, +return_numpy=False) +``` + +## 总结 + +本章介绍了传统的推荐系统方法和YouTube的深度神经网络推荐系统,并以电影推荐为例,使用PaddlePaddle训练了一个个性化推荐神经网络模型。推荐系统几乎涵盖了电商系统、社交网络、广告推荐、搜索引擎等领域的方方面面,而在图像处理、自然语言处理等领域已经发挥重要作用的深度学习技术,也将会在推荐系统领域大放异彩。 + +## 参考文献 + +1. [Peter Brusilovsky](https://en.wikipedia.org/wiki/Peter_Brusilovsky) (2007). *The Adaptive Web*. p. 325. +2. Robin Burke , [Hybrid Web Recommender Systems](http://www.dcs.warwick.ac.uk/~acristea/courses/CS411/2010/Book%20-%20The%20Adaptive%20Web/HybridWebRecommenderSystems.pdf), pp. 377-408, The Adaptive Web, Peter Brusilovsky, Alfred Kobsa, Wolfgang Nejdl (Ed.), Lecture Notes in Computer Science, Springer-Verlag, Berlin, Germany, Lecture Notes in Computer Science, Vol. 4321, May 2007, 978-3-540-72078-2. +3. P. Resnick, N. Iacovou, etc. “[GroupLens: An Open Architecture for Collaborative Filtering of Netnews](http://ccs.mit.edu/papers/CCSWP165.html)”, Proceedings of ACM Conference on Computer Supported Cooperative Work, CSCW 1994. pp.175-186. +4. Sarwar, Badrul, et al. "[Item-based collaborative filtering recommendation algorithms.](http://files.grouplens.org/papers/www10_sarwar.pdf)" *Proceedings of the 10th international conference on World Wide Web*. ACM, 2001. +5. Kautz, Henry, Bart Selman, and Mehul Shah. "[Referral Web: combining social networks and collaborative filtering.](http://www.cs.cornell.edu/selman/papers/pdf/97.cacm.refweb.pdf)" Communications of the ACM 40.3 (1997): 63-65. APA +6. Yuan, Jianbo, et al. ["Solving Cold-Start Problem in Large-scale Recommendation Engines: A Deep Learning Approach."](https://arxiv.org/pdf/1611.05480v1.pdf) *arXiv preprint arXiv:1611.05480* (2016). +7. Covington P, Adams J, Sargin E. [Deep neural networks for youtube recommendations](https://static.googleusercontent.com/media/research.google.com/zh-CN//pubs/archive/45530.pdf)[C]//Proceedings of the 10th ACM Conference on Recommender Systems. ACM, 2016: 191-198. + + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/.gitignore new file mode 100644 index 0000000000..667762d327 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/.gitignore @@ -0,0 +1,10 @@ +data/aclImdb +data/imdb +data/pre-imdb +data/mosesdecoder-master +*.log +model_output +dataprovider_copy_1.py +model.list +*.pyc +.DS_Store diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm.png new file mode 100644 index 0000000000..98fbea413a Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm_en.png new file mode 100755 index 0000000000..d73a00bf2c Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/lstm_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/rnn.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/rnn.png new file mode 100755 index 0000000000..26c904102a Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/rnn.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm.jpg b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm.jpg new file mode 100644 index 0000000000..6b2adf70f2 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm.jpg differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm_en.png b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm_en.png new file mode 100755 index 0000000000..8b5dbd7261 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/image/stacked_lstm_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/index.md b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/index.md new file mode 100644 index 0000000000..624de7e4d4 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/understand_sentiment/index.md @@ -0,0 +1,354 @@ +# 情感分析 + +本教程源代码目录在[book/understand_sentiment](https://github.com/PaddlePaddle/book/tree/develop/06.understand_sentiment), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +在自然语言处理中,情感分析一般是指判断一段文本所表达的情绪状态。其中,一段文本可以是一个句子,一个段落或一个文档。情绪状态可以是两类,如(正面,负面),(高兴,悲伤);也可以是三类,如(积极,消极,中性)等等。情感分析的应用场景十分广泛,如把用户在购物网站(亚马逊、天猫、淘宝等)、旅游网站、电影评论网站上发表的评论分成正面评论和负面评论;或为了分析用户对于某一产品的整体使用感受,抓取产品的用户评论并进行情感分析等等。表格1展示了对电影评论进行情感分析的例子: + +| 电影评论 | 类别 | +| -------- | ----- | +| 在冯小刚这几年的电影里,算最好的一部的了| 正面 | +| 很不好看,好像一个地方台的电视剧 | 负面 | +| 圆方镜头全程炫技,色调背景美则美矣,但剧情拖沓,口音不伦不类,一直努力却始终无法入戏| 负面| +|剧情四星。但是圆镜视角加上婺源的风景整个非常有中国写意山水画的感觉,看得实在太舒服了。。|正面| + +

表格 1 电影评论情感分析

+ +在自然语言处理中,情感分析属于典型的**文本分类**问题,即把需要进行情感分析的文本划分为其所属类别。文本分类涉及文本表示和分类方法两个问题。在深度学习的方法出现之前,主流的文本表示方法为词袋模型BOW(bag of words),话题模型等等;分类方法有SVM(support vector machine), LR(logistic regression)等等。 + +对于一段文本,BOW表示会忽略其词顺序、语法和句法,将这段文本仅仅看做是一个词集合,因此BOW方法并不能充分表示文本的语义信息。例如,句子“这部电影糟糕透了”和“一个乏味,空洞,没有内涵的作品”在情感分析中具有很高的语义相似度,但是它们的BOW表示的相似度为0。又如,句子“一个空洞,没有内涵的作品”和“一个不空洞而且有内涵的作品”的BOW相似度很高,但实际上它们的意思很不一样。 + +本章我们所要介绍的深度学习模型克服了BOW表示的上述缺陷,它在考虑词顺序的基础上把文本映射到低维度的语义空间,并且以端对端(end to end)的方式进行文本表示及分类,其性能相对于传统方法有显著的提升\[[1](#参考文献)\]。 + +## 模型概览 +本章所使用的文本表示模型为卷积神经网络(Convolutional Neural Networks)和循环神经网络(Recurrent Neural Networks)及其扩展。下面依次介绍这几个模型。 + +### 文本卷积神经网络简介(CNN) + +我们在[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节介绍过应用于文本数据的卷积神经网络模型的计算过程,这里进行一个简单的回顾。 + +对卷积神经网络来说,首先使用卷积处理输入的词向量序列,产生一个特征图(feature map),对特征图采用时间维度上的最大池化(max pooling over time)操作得到此卷积核对应的整句话的特征,最后,将所有卷积核得到的特征拼接起来即为文本的定长向量表示,对于文本分类问题,将其连接至softmax即构建出完整的模型。在实际应用中,我们会使用多个卷积核来处理句子,窗口大小相同的卷积核堆叠起来形成一个矩阵,这样可以更高效的完成运算。另外,我们也可使用窗口大小不同的卷积核来处理句子,[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节的图3作为示意画了四个卷积核,不同颜色表示不同大小的卷积核操作。 + +对于一般的短文本分类问题,上文所述的简单的文本卷积网络即可达到很高的正确率\[[1](#参考文献)\]。若想得到更抽象更高级的文本特征表示,可以构建深层文本卷积神经网络\[[2](#参考文献),[3](#参考文献)\]。 + +### 循环神经网络(RNN) + +循环神经网络是一种能对序列数据进行精确建模的有力工具。实际上,循环神经网络的理论计算能力是图灵完备的\[[4](#参考文献)\]。自然语言是一种典型的序列数据(词序列),近年来,循环神经网络及其变体(如long short term memory\[[5](#参考文献)\]等)在自然语言处理的多个领域,如语言模型、句法解析、语义角色标注(或一般的序列标注)、语义表示、图文生成、对话、机器翻译等任务上均表现优异甚至成为目前效果最好的方法。 + +![rnn](./image/rnn.png) +

+图1. 循环神经网络按时间展开的示意图 +

+ +循环神经网络按时间展开后如图1所示:在第`$t$`时刻,网络读入第`$t$`个输入`$x_t$`(向量表示)及前一时刻隐层的状态值`$h_{t-1}$`(向量表示,`$h_0$`一般初始化为`$0$`向量),计算得出本时刻隐层的状态值`$h_t$`,重复这一步骤直至读完所有输入。如果将循环神经网络所表示的函数记为`$f$`,则其公式可表示为: + +$$h_t=f(x_t,h_{t-1})=\sigma(W_{xh}x_t+W_{hh}h_{t-1}+b_h)$$ + +其中`$W_{xh}$`是输入到隐层的矩阵参数,`$W_{hh}$`是隐层到隐层的矩阵参数,`$b_h$`为隐层的偏置向量(bias)参数,`$\sigma$`为`$sigmoid$`函数。 + +在处理自然语言时,一般会先将词(one-hot表示)映射为其词向量(word embedding)表示,然后再作为循环神经网络每一时刻的输入`$x_t$`。此外,可以根据实际需要的不同在循环神经网络的隐层上连接其它层。如,可以把一个循环神经网络的隐层输出连接至下一个循环神经网络的输入构建深层(deep or stacked)循环神经网络,或者提取最后一个时刻的隐层状态作为句子表示进而使用分类模型等等。 + +### 长短期记忆网络(LSTM) + +对于较长的序列数据,循环神经网络的训练过程中容易出现梯度消失或爆炸现象\[[6](#参考文献)\]。为了解决这一问题,Hochreiter S, Schmidhuber J. (1997)提出了LSTM(long short term memory\[[5](#参考文献)\])。 + +相比于简单的循环神经网络,LSTM增加了记忆单元`$c$`、输入门`$i$`、遗忘门`$f$`及输出门`$o$`。这些门及记忆单元组合起来大大提升了循环神经网络处理长序列数据的能力。若将基于LSTM的循环神经网络表示的函数记为`$F$`,则其公式为: + +$$ h_t=F(x_t,h_{t-1})$$ + +`$F$`由下列公式组合而成\[[7](#参考文献)\]: +$$ i_t = \sigma{(W_{xi}x_t+W_{hi}h_{t-1}+W_{ci}c_{t-1}+b_i)} $$ +$$ f_t = \sigma(W_{xf}x_t+W_{hf}h_{t-1}+W_{cf}c_{t-1}+b_f) $$ +$$ c_t = f_t\odot c_{t-1}+i_t\odot tanh(W_{xc}x_t+W_{hc}h_{t-1}+b_c) $$ +$$ o_t = \sigma(W_{xo}x_t+W_{ho}h_{t-1}+W_{co}c_{t}+b_o) $$ +$$ h_t = o_t\odot tanh(c_t) $$ +其中,`$i_t, f_t, c_t, o_t$`分别表示输入门,遗忘门,记忆单元及输出门的向量值,带角标的`$W$`及`$b$`为模型参数,`$tanh$`为双曲正切函数,`$\odot$`表示逐元素(elementwise)的乘法操作。输入门控制着新输入进入记忆单元`$c$`的强度,遗忘门控制着记忆单元维持上一时刻值的强度,输出门控制着输出记忆单元的强度。三种门的计算方式类似,但有着完全不同的参数,它们各自以不同的方式控制着记忆单元`$c$`,如图2所示: + +![lstm](./image/lstm.png) +

+图2. 时刻`$t$`的LSTM [7] +

+ +LSTM通过给简单的循环神经网络增加记忆及控制门的方式,增强了其处理远距离依赖问题的能力。类似原理的改进还有Gated Recurrent Unit (GRU)\[[8](#参考文献)\],其设计更为简洁一些。**这些改进虽然各有不同,但是它们的宏观描述却与简单的循环神经网络一样(如图2所示),即隐状态依据当前输入及前一时刻的隐状态来改变,不断地循环这一过程直至输入处理完毕:** + +$$ h_t=Recrurent(x_t,h_{t-1})$$ + +其中,`$Recrurent$`可以表示简单的循环神经网络、GRU或LSTM。 + +### 栈式双向LSTM(Stacked Bidirectional LSTM) + +对于正常顺序的循环神经网络,`$h_t$`包含了`$t$`时刻之前的输入信息,也就是上文信息。同样,为了得到下文信息,我们可以使用反方向(将输入逆序处理)的循环神经网络。结合构建深层循环神经网络的方法(深层神经网络往往能得到更抽象和高级的特征表示),我们可以通过构建更加强有力的基于LSTM的栈式双向循环神经网络\[[9](#参考文献)\],来对时序数据进行建模。 + +如图3所示(以三层为例),奇数层LSTM正向,偶数层LSTM反向,高一层的LSTM使用低一层LSTM及之前所有层的信息作为输入,对最高层LSTM序列使用时间维度上的最大池化即可得到文本的定长向量表示(这一表示充分融合了文本的上下文信息,并且对文本进行了深层次抽象),最后我们将文本表示连接至softmax构建分类模型。 + +![stacked_lstm](./image/stacked_lstm.jpg) +

+图3. 栈式双向LSTM用于文本分类 +

+ + +## 数据集介绍 + +我们以[IMDB情感分析数据集](http://ai.stanford.edu/%7Eamaas/data/sentiment/)为例进行介绍。IMDB数据集的训练集和测试集分别包含25000个已标注过的电影评论。其中,负面评论的得分小于等于4,正面评论的得分大于等于7,满分10分。 +```text +aclImdb +|- test +|-- neg +|-- pos +|- train +|-- neg +|-- pos +``` +Paddle在`dataset/imdb.py`中提实现了imdb数据集的自动下载和读取,并提供了读取字典、训练数据、测试数据等API。 + +## 配置模型 + +在该示例中,我们实现了两种文本分类算法,分别基于[推荐系统](https://github.com/PaddlePaddle/book/tree/develop/05.recommender_system)一节介绍过的文本卷积神经网络,以及[栈式双向LSTM](#栈式双向LSTM(Stacked Bidirectional LSTM))。我们首先引入要用到的库和定义全局变量: + +```python +import paddle +import paddle.fluid as fluid +from functools import partial +import numpy as np + +CLASS_DIM = 2 +EMB_DIM = 128 +HID_DIM = 512 +BATCH_SIZE = 128 +USE_GPU = False +``` + + +### 文本卷积神经网络 +我们构建神经网络`convolution_net`,示例代码如下。 +需要注意的是:`fluid.nets.sequence_conv_pool` 包含卷积和池化层两个操作。 + +```python +def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): +emb = fluid.layers.embedding( +input=data, size=[input_dim, emb_dim], is_sparse=True) +conv_3 = fluid.nets.sequence_conv_pool( +input=emb, +num_filters=hid_dim, +filter_size=3, +act="tanh", +pool_type="sqrt") +conv_4 = fluid.nets.sequence_conv_pool( +input=emb, +num_filters=hid_dim, +filter_size=4, +act="tanh", +pool_type="sqrt") +prediction = fluid.layers.fc( +input=[conv_3, conv_4], size=class_dim, act="softmax") +return prediction +``` + +网络的输入`input_dim`表示的是词典的大小,`class_dim`表示类别数。这里,我们使用[`sequence_conv_pool`](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/trainer_config_helpers/networks.py) API实现了卷积和池化操作。 + +### 栈式双向LSTM + +栈式双向神经网络`stacked_lstm_net`的代码片段如下: + +```python +def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): + +emb = fluid.layers.embedding( +input=data, size=[input_dim, emb_dim], is_sparse=True) + +fc1 = fluid.layers.fc(input=emb, size=hid_dim) +lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) + +inputs = [fc1, lstm1] + +for i in range(2, stacked_num + 1): +fc = fluid.layers.fc(input=inputs, size=hid_dim) +lstm, cell = fluid.layers.dynamic_lstm( +input=fc, size=hid_dim, is_reverse=(i % 2) == 0) +inputs = [fc, lstm] + +fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') +lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') + +prediction = fluid.layers.fc(input=[fc_last, lstm_last], +size=class_dim, +act='softmax') +return prediction +``` +以上的栈式双向LSTM抽象出了高级特征并把其映射到和分类类别数同样大小的向量上。`paddle.activation.Softmax`函数用来计算分类属于某个类别的概率。 + +重申一下,此处我们可以调用`convolution_net`或`stacked_lstm_net`的任何一个。我们以`convolution_net`为例。 + +接下来我们定义预测程序(`inference_program`)。预测程序使用`convolution_net`来对`fluid.layer.data`的输入进行预测。 + +```python +def inference_program(word_dict): +data = fluid.layers.data( +name="words", shape=[1], dtype="int64", lod_level=1) + +dict_dim = len(word_dict) +net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM) +return net +``` + +我们这里定义了`training_program`。它使用了从`inference_program`返回的结果来计算误差。我们同时定义了优化函数`optimizer_func`。 + +因为是有监督的学习,训练集的标签也在`paddle.layer.data`中定义了。在训练过程中,交叉熵用来在`paddle.layer.classification_cost`中作为损失函数。 + +在测试过程中,分类器会计算各个输出的概率。第一个返回的数值规定为 损耗(cost)。 + +```python +def train_program(word_dict): +prediction = inference_program(word_dict) +label = fluid.layers.data(name="label", shape=[1], dtype="int64") +cost = fluid.layers.cross_entropy(input=prediction, label=label) +avg_cost = fluid.layers.mean(cost) +accuracy = fluid.layers.accuracy(input=prediction, label=label) +return [avg_cost, accuracy] + + +def optimizer_func(): +return fluid.optimizer.Adagrad(learning_rate=0.002) +``` + +## 训练模型 + +### 定义训练环境 + +定义您的训练是在CPU上还是在GPU上: + + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### 定义数据提供器 + +下一步是为训练和测试定义数据提供器。提供器读入一个大小为 BATCH_SIZE的数据。paddle.dataset.imdb.train 每次会在乱序化后提供一个大小为BATCH_SIZE的数据,乱序化的大小为缓存大小buf_size。 + +注意:读取IMDB的数据可能会花费几分钟的时间,请耐心等待。 + +```python +print("Loading IMDB word dict....") +word_dict = paddle.dataset.imdb.word_dict() + +print ("Reading training data....") +train_reader = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.imdb.train(word_dict), buf_size=25000), +batch_size=BATCH_SIZE) +``` + +### 构造训练器(trainer) +训练器需要一个训练程序和一个训练优化函数。 + +```python +trainer = fluid.Trainer( +train_func=partial(train_program, word_dict), +place=place, +optimizer_func=optimizer_func) +``` + +### 提供数据 + +`feed_order`用来定义每条产生的数据和`paddle.layer.data`之间的映射关系。比如,`imdb.train`产生的第一列的数据对应的是`words`这个特征。 + +```python +feed_order = ['words', 'label'] +``` + +### 事件处理器 + +回调函数event_handler在一个之前定义好的事件发生后会被调用。例如,我们可以在每步训练结束后查看误差。 + +```python +# Specify the directory path to save the parameters +params_dirname = "understand_sentiment_conv.inference.model" + +def event_handler(event): +if isinstance(event, fluid.EndStepEvent): +print("Step {0}, Epoch {1} Metrics {2}".format( +event.step, event.epoch, map(np.array, event.metrics))) + +if event.step == 10: +trainer.save_params(params_dirname) +trainer.stop() +``` + +### 开始训练 + +最后,我们传入训练循环数(num_epoch)和一些别的参数,调用 trainer.train 来开始训练。 + +```python +trainer.train( +num_epochs=1, +event_handler=event_handler, +reader=train_reader, +feed_order=feed_order) +``` + +## 应用模型 + +### 构建预测器 + +传入`inference_program`和`params_dirname`来初始化一个预测器, `params_dirname`用来存放训练过程中的各个参数。 + +```python +inferencer = fluid.Inferencer( +inference_program, param_path=params_dirname, place=place) +``` + +### 生成测试用输入数据 + +为了进行预测,我们任意选取3个评论。请随意选取您看好的3个。我们把评论中的每个词对应到`word_dict`中的id。如果词典中没有这个词,则设为`unknown`。 +然后我们用`create_lod_tensor`来创建细节层次的张量。 + +```python +reviews_str = [ +'read the book forget the movie', 'this is a great movie', 'this is very bad' +] +reviews = [c.split() for c in reviews_str] + +UNK = word_dict[''] +lod = [] +for c in reviews: +lod.append([word_dict.get(words, UNK) for words in c]) + +base_shape = [[len(c) for c in lod]] + +tensor_words = fluid.create_lod_tensor(lod, base_shape, place) +``` + +## 应用模型 + +现在我们可以对每一条评论进行正面或者负面的预测啦。 + +```python +results = inferencer.infer({'words': tensor_words}) + +for i, r in enumerate(results[0]): +print("Predict probability of ", r[0], " to be positive and ", r[1], " to be negative for review \'", reviews_str[i], "\'") + +``` + + +## 总结 + +本章我们以情感分析为例,介绍了使用深度学习的方法进行端对端的短文本分类,并且使用PaddlePaddle完成了全部相关实验。同时,我们简要介绍了两种文本处理模型:卷积神经网络和循环神经网络。在后续的章节中我们会看到这两种基本的深度学习模型在其它任务上的应用。 + + +## 参考文献 +1. Kim Y. [Convolutional neural networks for sentence classification](http://arxiv.org/pdf/1408.5882)[J]. arXiv preprint arXiv:1408.5882, 2014. +2. Kalchbrenner N, Grefenstette E, Blunsom P. [A convolutional neural network for modelling sentences](http://arxiv.org/pdf/1404.2188.pdf?utm_medium=App.net&utm_source=PourOver)[J]. arXiv preprint arXiv:1404.2188, 2014. +3. Yann N. Dauphin, et al. [Language Modeling with Gated Convolutional Networks](https://arxiv.org/pdf/1612.08083v1.pdf)[J] arXiv preprint arXiv:1612.08083, 2016. +4. Siegelmann H T, Sontag E D. [On the computational power of neural nets](http://research.cs.queensu.ca/home/akl/cisc879/papers/SELECTED_PAPERS_FROM_VARIOUS_SOURCES/05070215382317071.pdf)[C]//Proceedings of the fifth annual workshop on Computational learning theory. ACM, 1992: 440-449. +5. Hochreiter S, Schmidhuber J. [Long short-term memory](http://web.eecs.utk.edu/~itamar/courses/ECE-692/Bobby_paper1.pdf)[J]. Neural computation, 1997, 9(8): 1735-1780. +6. Bengio Y, Simard P, Frasconi P. [Learning long-term dependencies with gradient descent is difficult](http://www-dsi.ing.unifi.it/~paolo/ps/tnn-94-gradient.pdf)[J]. IEEE transactions on neural networks, 1994, 5(2): 157-166. +7. Graves A. [Generating sequences with recurrent neural networks](http://arxiv.org/pdf/1308.0850)[J]. arXiv preprint arXiv:1308.0850, 2013. +8. Cho K, Van Merriënboer B, Gulcehre C, et al. [Learning phrase representations using RNN encoder-decoder for statistical machine translation](http://arxiv.org/pdf/1406.1078)[J]. arXiv preprint arXiv:1406.1078, 2014. +9. Zhou J, Xu W. [End-to-end learning of semantic role labeling using recurrent neural networks](http://www.aclweb.org/anthology/P/P15/P15-1109.pdf)[C]//Proceedings of the Annual Meeting of the Association for Computational Linguistics. 2015. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/.gitignore b/doc/fluid/new_docs/beginners_guide/basics/word2vec/.gitignore new file mode 100644 index 0000000000..a620e0279c --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/word2vec/.gitignore @@ -0,0 +1,3 @@ +data/train.list +data/test.list +data/simple-examples* diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/2d_similarity.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/2d_similarity.png new file mode 100644 index 0000000000..384f59919a Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/2d_similarity.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow.png new file mode 100644 index 0000000000..76b7d4bc0f Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow_en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow_en.png new file mode 100755 index 0000000000..d985c393e6 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/cbow_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.en.png new file mode 100755 index 0000000000..2e16ab2f44 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.png new file mode 100644 index 0000000000..2449dce6a8 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/ngram.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm.png new file mode 100644 index 0000000000..1e0b40a8f7 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm_en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm_en.png new file mode 100755 index 0000000000..158bd64b8f Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/nnlm_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/sentence_emb.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/sentence_emb.png new file mode 100644 index 0000000000..ce4a8bf476 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/sentence_emb.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram.png new file mode 100644 index 0000000000..a3ab385845 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram_en.png b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram_en.png new file mode 100755 index 0000000000..3c36c6d1f6 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/basics/word2vec/image/skipgram_en.png differ diff --git a/doc/fluid/new_docs/beginners_guide/basics/word2vec/index.md b/doc/fluid/new_docs/beginners_guide/basics/word2vec/index.md new file mode 100644 index 0000000000..e73a6334ca --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/basics/word2vec/index.md @@ -0,0 +1,440 @@ + +# 词向量 + +本教程源代码目录在[book/word2vec](https://github.com/PaddlePaddle/book/tree/develop/04.word2vec), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 + +本章我们介绍词的向量表征,也称为word embedding。词向量是自然语言处理中常见的一个操作,是搜索引擎、广告系统、推荐系统等互联网服务背后常见的基础技术。 + +在这些互联网服务里,我们经常要比较两个词或者两段文本之间的相关性。为了做这样的比较,我们往往先要把词表示成计算机适合处理的方式。最自然的方式恐怕莫过于向量空间模型(vector space model)。 +在这种方式里,每个词被表示成一个实数向量(one-hot vector),其长度为字典大小,每个维度对应一个字典里的每个词,除了这个词对应维度上的值是1,其他元素都是0。 + +One-hot vector虽然自然,但是用处有限。比如,在互联网广告系统里,如果用户输入的query是“母亲节”,而有一个广告的关键词是“康乃馨”。虽然按照常理,我们知道这两个词之间是有联系的——母亲节通常应该送给母亲一束康乃馨;但是这两个词对应的one-hot vectors之间的距离度量,无论是欧氏距离还是余弦相似度(cosine similarity),由于其向量正交,都认为这两个词毫无相关性。 得出这种与我们相悖的结论的根本原因是:每个词本身的信息量都太小。所以,仅仅给定两个词,不足以让我们准确判别它们是否相关。要想精确计算相关性,我们还需要更多的信息——从大量数据里通过机器学习方法归纳出来的知识。 + +在机器学习领域里,各种“知识”被各种模型表示,词向量模型(word embedding model)就是其中的一类。通过词向量模型可将一个 one-hot vector映射到一个维度更低的实数向量(embedding vector),如`$embedding(Mother's\ Day) = [0.3, 4.2, -1.5, ...], embedding(Carnation) = [0.2, 5.6, -2.3, ...]$`。在这个映射到的实数向量表示中,希望两个语义(或用法)上相似的词对应的词向量“更像”,这样如“母亲节”和“康乃馨”的对应词向量的余弦相似度就不再为零了。 + +词向量模型可以是概率模型、共生矩阵(co-occurrence matrix)模型或神经元网络模型。在用神经网络求词向量之前,传统做法是统计一个词语的共生矩阵`$X$`。`$X$`是一个`$|V| \times |V|$` 大小的矩阵,`$X_{ij}$`表示在所有语料中,词汇表`V`(vocabulary)中第i个词和第j个词同时出现的词数,`$|V|$`为词汇表的大小。对`$X$`做矩阵分解(如奇异值分解,Singular Value Decomposition \[[5](#参考文献)\]),得到的`$U$`即视为所有词的词向量: + +$$X = USV^T$$ + +但这样的传统做法有很多问题:
+1) 由于很多词没有出现,导致矩阵极其稀疏,因此需要对词频做额外处理来达到好的矩阵分解效果;
+2) 矩阵非常大,维度太高(通常达到`$10^6*10^6$`的数量级);
+3) 需要手动去掉停用词(如although, a,...),不然这些频繁出现的词也会影响矩阵分解的效果。 + + +基于神经网络的模型不需要计算存储一个在全语料上统计的大表,而是通过学习语义信息得到词向量,因此能很好地解决以上问题。在本章里,我们将展示基于神经网络训练词向量的细节,以及如何用PaddlePaddle训练一个词向量模型。 + + +## 效果展示 + +本章中,当词向量训练好后,我们可以用数据可视化算法t-SNE\[[4](#参考文献)\]画出词语特征在二维上的投影(如下图所示)。从图中可以看出,语义相关的词语(如a, the, these; big, huge)在投影上距离很近,语意无关的词(如say, business; decision, japan)在投影上的距离很远。 + +![2d_similarity](./image/2d_similarity.png) +

+图1. 词向量的二维投影 +

+ +另一方面,我们知道两个向量的余弦值在`$[-1,1]$`的区间内:两个完全相同的向量余弦值为1, 两个相互垂直的向量之间余弦值为0,两个方向完全相反的向量余弦值为-1,即相关性和余弦值大小成正比。因此我们还可以计算两个词向量的余弦相似度: + +``` +similarity: 0.899180685161 +please input two words: big huge + +please input two words: from company +similarity: -0.0997506977351 +``` + +以上结果可以通过运行`calculate_dis.py`, 加载字典里的单词和对应训练特征结果得到,我们将在[应用模型](#应用模型)中详细描述用法。 + + +## 模型概览 + +在这里我们介绍三个训练词向量的模型:N-gram模型,CBOW模型和Skip-gram模型,它们的中心思想都是通过上下文得到一个词出现的概率。对于N-gram模型,我们会先介绍语言模型的概念,并在之后的[训练模型](#训练模型)中,带大家用PaddlePaddle实现它。而后两个模型,是近年来最有名的神经元词向量模型,由 Tomas Mikolov 在Google 研发\[[3](#参考文献)\],虽然它们很浅很简单,但训练效果很好。 + +### 语言模型 + +在介绍词向量模型之前,我们先来引入一个概念:语言模型。 +语言模型旨在为语句的联合概率函数`$P(w_1, ..., w_T)$`建模, 其中`$w_i$`表示句子中的第i个词。语言模型的目标是,希望模型对有意义的句子赋予大概率,对没意义的句子赋予小概率。 +这样的模型可以应用于很多领域,如机器翻译、语音识别、信息检索、词性标注、手写识别等,它们都希望能得到一个连续序列的概率。 以信息检索为例,当你在搜索“how long is a football bame”时(bame是一个医学名词),搜索引擎会提示你是否希望搜索"how long is a football game", 这是因为根据语言模型计算出“how long is a football bame”的概率很低,而与bame近似的,可能引起错误的词中,game会使该句生成的概率最大。 + +对语言模型的目标概率`$P(w_1, ..., w_T)$`,如果假设文本中每个词都是相互独立的,则整句话的联合概率可以表示为其中所有词语条件概率的乘积,即: + +$$P(w_1, ..., w_T) = \prod_{t=1}^TP(w_t)$$ + +然而我们知道语句中的每个词出现的概率都与其前面的词紧密相关, 所以实际上通常用条件概率表示语言模型: + +$$P(w_1, ..., w_T) = \prod_{t=1}^TP(w_t | w_1, ... , w_{t-1})$$ + + + +### N-gram neural model + +在计算语言学中,n-gram是一种重要的文本表示方法,表示一个文本中连续的n个项。基于具体的应用场景,每一项可以是一个字母、单词或者音节。 n-gram模型也是统计语言模型中的一种重要方法,用n-gram训练语言模型时,一般用每个n-gram的历史n-1个词语组成的内容来预测第n个词。 + +Yoshua Bengio等科学家就于2003年在著名论文 Neural Probabilistic Language Models \[[1](#参考文献)\] 中介绍如何学习一个神经元网络表示的词向量模型。文中的神经概率语言模型(Neural Network Language Model,NNLM)通过一个线性映射和一个非线性隐层连接,同时学习了语言模型和词向量,即通过学习大量语料得到词语的向量表达,通过这些向量得到整个句子的概率。用这种方法学习语言模型可以克服维度灾难(curse of dimensionality),即训练和测试数据不同导致的模型不准。注意:由于“神经概率语言模型”说法较为泛泛,我们在这里不用其NNLM的本名,考虑到其具体做法,本文中称该模型为N-gram neural model。 + +我们在上文中已经讲到用条件概率建模语言模型,即一句话中第`$t$`个词的概率和该句话的前`$t-1$`个词相关。可实际上越远的词语其实对该词的影响越小,那么如果考虑一个n-gram, 每个词都只受其前面`n-1`个词的影响,则有: + +$$P(w_1, ..., w_T) = \prod_{t=n}^TP(w_t|w_{t-1}, w_{t-2}, ..., w_{t-n+1})$$ + +给定一些真实语料,这些语料中都是有意义的句子,N-gram模型的优化目标则是最大化目标函数: + +$$\frac{1}{T}\sum_t f(w_t, w_{t-1}, ..., w_{t-n+1};\theta) + R(\theta)$$ + +其中`$f(w_t, w_{t-1}, ..., w_{t-n+1})$`表示根据历史n-1个词得到当前词`$w_t$`的条件概率,`$R(\theta)$`表示参数正则项。 + +![nnlm](./image/nnlm.png) +

+图2. N-gram神经网络模型 +

+ +图2展示了N-gram神经网络模型,从下往上看,该模型分为以下几个部分: +- 对于每个样本,模型输入`$w_{t-n+1},...w_{t-1}$`, 输出句子第t个词为字典中`|V|`个词的概率。 + +每个输入词`$w_{t-n+1},...w_{t-1}$`首先通过映射矩阵映射到词向量`$C(w_{t-n+1}),...C(w_{t-1})$`。 + +- 然后所有词语的词向量连接成一个大向量,并经过一个非线性映射得到历史词语的隐层表示: + +$$g=Utanh(\theta^Tx + b_1) + Wx + b_2$$ + +其中,`$x$`为所有词语的词向量连接成的大向量,表示文本历史特征;`$\theta$`、`$U$`、`$b_1$`、`$b_2$`和`$W$`分别为词向量层到隐层连接的参数。`$g$`表示未经归一化的所有输出单词概率,`$g_i$`表示未经归一化的字典中第`$i$`个单词的输出概率。 + +- 根据softmax的定义,通过归一化`$g_i$`, 生成目标词`$w_t$`的概率为: + +$$P(w_t | w_1, ..., w_{t-n+1}) = \frac{e^{g_{w_t}}}{\sum_i^{|V|} e^{g_i}}$$ + +- 整个网络的损失值(cost)为多类分类交叉熵,用公式表示为 + +$$J(\theta) = -\sum_{i=1}^N\sum_{c=1}^{|V|}y_k^{i}log(softmax(g_k^i))$$ + +其中`$y_k^i$`表示第`$i$`个样本第`$k$`类的真实标签(0或1),`$softmax(g_k^i)$`表示第i个样本第k类softmax输出的概率。 + + + +### Continuous Bag-of-Words model(CBOW) + +CBOW模型通过一个词的上下文(各N个词)预测当前词。当N=2时,模型如下图所示: + +![cbow](./image/cbow.png) +

+图3. CBOW模型 +

+ +具体来说,不考虑上下文的词语输入顺序,CBOW是用上下文词语的词向量的均值来预测当前词。即: + +$$context = \frac{x_{t-1} + x_{t-2} + x_{t+1} + x_{t+2}}{4}$$ + +其中`$x_t$`为第`$t$`个词的词向量,分类分数(score)向量 `$z=U*context$`,最终的分类`$y$`采用softmax,损失函数采用多类分类交叉熵。 + +### Skip-gram model + +CBOW的好处是对上下文词语的分布在词向量上进行了平滑,去掉了噪声,因此在小数据集上很有效。而Skip-gram的方法中,用一个词预测其上下文,得到了当前词上下文的很多样本,因此可用于更大的数据集。 + +![skipgram](./image/skipgram.png) +

+图4. Skip-gram模型 +

+ +如上图所示,Skip-gram模型的具体做法是,将一个词的词向量映射到`$2n$`个词的词向量(`$2n$`表示当前输入词的前后各`$n$`个词),然后分别通过softmax得到这`$2n$`个词的分类损失值之和。 + + +## 数据准备 + +### 数据介绍 + +本教程使用Penn Treebank (PTB)(经Tomas Mikolov预处理过的版本)数据集。PTB数据集较小,训练速度快,应用于Mikolov的公开语言模型训练工具\[[2](#参考文献)\]中。其统计情况如下: + +

+ + + + + + + + + + + + + + + + +
训练数据验证数据测试数据
ptb.train.txtptb.valid.txtptb.test.txt
42068句3370句3761句
+

+ + +### 数据预处理 + +本章训练的是5-gram模型,表示在PaddlePaddle训练时,每条数据的前4个词用来预测第5个词。PaddlePaddle提供了对应PTB数据集的python包`paddle.dataset.imikolov`,自动做数据的下载与预处理,方便大家使用。 + +预处理会把数据集中的每一句话前后加上开始符号``以及结束符号``。然后依据窗口大小(本教程中为5),从头到尾每次向右滑动窗口并生成一条数据。 + +如"I have a dream that one day" 一句提供了5条数据: + +```text + I have a dream +I have a dream that +have a dream that one +a dream that one day +dream that one day +``` + +最后,每个输入会按其单词次在字典里的位置,转化成整数的索引序列,作为PaddlePaddle的输入。 + +## 编程实现 + +本配置的模型结构如下图所示: + +![ngram](./image/ngram.png) +

+图5. 模型配置中的N-gram神经网络模型 +

+ +首先,加载所需要的包: + +```python +import paddle +import paddle.fluid as fluid +import numpy +from functools import partial +import math +import os +import sys +``` + +然后,定义参数: +```python +EMBED_SIZE = 32 # word vector dimension +HIDDEN_SIZE = 256 # hidden layer dimension +N = 5 # train 5-gram +BATCH_SIZE = 32 # batch size + +# can use CPU or GPU +use_cuda = os.getenv('WITH_GPU', '0') != '0' + +word_dict = paddle.dataset.imikolov.build_dict() +dict_size = len(word_dict) +``` + +不同于之前的PaddlePaddle v2版本,在新的Fluid版本里,我们不必再手动计算词向量。PaddlePaddle提供了一个内置的方法`fluid.layers.embedding`,我们就可以直接用它来构造 N-gram 神经网络。 + +- 我们来定义我们的 N-gram 神经网络结构。这个结构在训练和预测中都会使用到。因为词向量比较稀疏,我们传入参数 `is_sparse == True`, 可以加速稀疏矩阵的更新。 + +```python +def inference_program(is_sparse): +first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') +second_word = fluid.layers.data(name='secondw', shape=[1], dtype='int64') +third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') +fourth_word = fluid.layers.data(name='fourthw', shape=[1], dtype='int64') + +embed_first = fluid.layers.embedding( +input=first_word, +size=[dict_size, EMBED_SIZE], +dtype='float32', +is_sparse=is_sparse, +param_attr='shared_w') +embed_second = fluid.layers.embedding( +input=second_word, +size=[dict_size, EMBED_SIZE], +dtype='float32', +is_sparse=is_sparse, +param_attr='shared_w') +embed_third = fluid.layers.embedding( +input=third_word, +size=[dict_size, EMBED_SIZE], +dtype='float32', +is_sparse=is_sparse, +param_attr='shared_w') +embed_fourth = fluid.layers.embedding( +input=fourth_word, +size=[dict_size, EMBED_SIZE], +dtype='float32', +is_sparse=is_sparse, +param_attr='shared_w') + +concat_embed = fluid.layers.concat( +input=[embed_first, embed_second, embed_third, embed_fourth], axis=1) +hidden1 = fluid.layers.fc(input=concat_embed, +size=HIDDEN_SIZE, +act='sigmoid') +predict_word = fluid.layers.fc(input=hidden1, size=dict_size, act='softmax') +return predict_word +``` + +- 基于以上的神经网络结构,我们可以如下定义我们的`训练`方法 + +```python +def train_program(is_sparse): +# The declaration of 'next_word' must be after the invoking of inference_program, +# or the data input order of train program would be [next_word, firstw, secondw, +# thirdw, fourthw], which is not correct. +predict_word = inference_program(is_sparse) +next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') +cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) +avg_cost = fluid.layers.mean(cost) +return avg_cost +``` + +- 现在我们可以开始训练啦。如今的版本较之以前就简单了许多。我们有现成的训练和测试集:`paddle.dataset.imikolov.train()`和`paddle.dataset.imikolov.test()`。两者都会返回一个读取器。在PaddlePaddle中,读取器是一个Python的函数,每次调用,会读取下一条数据。它是一个Python的generator。 + +`paddle.batch` 会读入一个读取器,然后输出一个批次化了的读取器。`event_handler`亦可以一并传入`trainer.train`来时不时的输出每个步骤,批次的训练情况。 + +```python +def optimizer_func(): +# Note here we need to choose more sophisticated optimizers +# such as AdaGrad with a decay rate. The normal SGD converges +# very slowly. +# optimizer=fluid.optimizer.SGD(learning_rate=0.001), +return fluid.optimizer.AdagradOptimizer( +learning_rate=3e-3, +regularization=fluid.regularizer.L2DecayRegularizer(8e-4)) + + +def train(use_cuda, train_program, params_dirname): +train_reader = paddle.batch( +paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) +test_reader = paddle.batch( +paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) + +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +def event_handler(event): +if isinstance(event, fluid.EndStepEvent): +# We output cost every 10 steps. +if event.step % 10 == 0: +outs = trainer.test( +reader=test_reader, +feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw']) +avg_cost = outs[0] + +print "Step %d: Average Cost %f" % (event.step, avg_cost) + +# If average cost is lower than 5.8, we consider the model good enough to stop. +# Note 5.8 is a relatively high value. In order to get a better model, one should +# aim for avg_cost lower than 3.5. But the training could take longer time. +if avg_cost < 5.8: +trainer.save_params(params_dirname) +trainer.stop() + +if math.isnan(avg_cost): +sys.exit("got NaN loss, training failed.") + +trainer = fluid.Trainer( +train_func=train_program, +optimizer_func=optimizer_func, +place=place) + +trainer.train( +reader=train_reader, +num_epochs=1, +event_handler=event_handler, +feed_order=['firstw', 'secondw', 'thirdw', 'fourthw', 'nextw']) +``` + +- `trainer.train`将会开始训练。从`event_handler`返回的监控情况如下: + +```python +Step 0: Average Cost 7.337213 +Step 10: Average Cost 6.136128 +Step 20: Average Cost 5.766995 +... +``` + +## 模型应用 +在模型训练后,我们可以用它做一些预测。 + +### 预测下一个词 +我们可以用我们训练过的模型,在得知之前的 N-gram 后,预测下一个词。 + +```python +def infer(use_cuda, inference_program, params_dirname=None): +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +inferencer = fluid.Inferencer( +infer_func=inference_program, param_path=params_dirname, place=place) + +# Setup inputs by creating 4 LoDTensors representing 4 words. Here each word +# is simply an index to look up for the corresponding word vector and hence +# the shape of word (base_shape) should be [1]. The length-based level of +# detail (lod) info of each LoDtensor should be [[1]] meaning there is only +# one lod_level and there is only one sequence of one word on this level. +# Note that lod info should be a list of lists. + +data1 = [[211]] # 'among' +data2 = [[6]] # 'a' +data3 = [[96]] # 'group' +data4 = [[4]] # 'of' +lod = [[1]] + +first_word = fluid.create_lod_tensor(data1, lod, place) +second_word = fluid.create_lod_tensor(data2, lod, place) +third_word = fluid.create_lod_tensor(data3, lod, place) +fourth_word = fluid.create_lod_tensor(data4, lod, place) + +result = inferencer.infer( +{ +'firstw': first_word, +'secondw': second_word, +'thirdw': third_word, +'fourthw': fourth_word +}, +return_numpy=False) + +print(numpy.array(result[0])) +most_possible_word_index = numpy.argmax(result[0]) +print(most_possible_word_index) +print([ +key for key, value in word_dict.iteritems() +if value == most_possible_word_index +][0]) +``` + +在经历3分钟的短暂训练后,我们得到如下的预测。我们的模型预测 `among a group of` 的下一个词是`a`。这比较符合文法规律。如果我们训练时间更长,比如几个小时,那么我们会得到的下一个预测是 `workers`。 + + +```python +[[0.00106646 0.0007907 0.00072041 ... 0.00049024 0.00041355 0.00084464]] +6 +a +``` + +整个程序的入口很简单: + +```python +def main(use_cuda, is_sparse): +if use_cuda and not fluid.core.is_compiled_with_cuda(): +return + +params_dirname = "word2vec.inference.model" + +train( +use_cuda=use_cuda, +train_program=partial(train_program, is_sparse), +params_dirname=params_dirname) + +infer( +use_cuda=use_cuda, +inference_program=partial(inference_program, is_sparse), +params_dirname=params_dirname) + + +main(use_cuda=use_cuda, is_sparse=True) +``` + + +## 总结 +本章中,我们介绍了词向量、语言模型和词向量的关系、以及如何通过训练神经网络模型获得词向量。在信息检索中,我们可以根据向量间的余弦夹角,来判断query和文档关键词这二者间的相关性。在句法分析和语义分析中,训练好的词向量可以用来初始化模型,以得到更好的效果。在文档分类中,有了词向量之后,可以用聚类的方法将文档中同义词进行分组,也可以用 N-gram 来预测下一个词。希望大家在本章后能够自行运用词向量进行相关领域的研究。 + + +## 参考文献 +1. Bengio Y, Ducharme R, Vincent P, et al. [A neural probabilistic language model](http://www.jmlr.org/papers/volume3/bengio03a/bengio03a.pdf)[J]. journal of machine learning research, 2003, 3(Feb): 1137-1155. +2. Mikolov T, Kombrink S, Deoras A, et al. [Rnnlm-recurrent neural network language modeling toolkit](http://www.fit.vutbr.cz/~imikolov/rnnlm/rnnlm-demo.pdf)[C]//Proc. of the 2011 ASRU Workshop. 2011: 196-201. +3. Mikolov T, Chen K, Corrado G, et al. [Efficient estimation of word representations in vector space](https://arxiv.org/pdf/1301.3781.pdf)[J]. arXiv preprint arXiv:1301.3781, 2013. +4. Maaten L, Hinton G. [Visualizing data using t-SNE](https://lvdmaaten.github.io/publications/papers/JMLR_2008.pdf)[J]. Journal of Machine Learning Research, 2008, 9(Nov): 2579-2605. +5. https://en.wikipedia.org/wiki/Singular_value_decomposition + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/index.rst b/doc/fluid/new_docs/beginners_guide/index.rst new file mode 100644 index 0000000000..e18933dcc0 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/index.rst @@ -0,0 +1,15 @@ +######## +新手入门 +######## + +.. todo:: + + 新手入门的导引文字,需要完善。 + +.. toctree:: + :maxdepth: 2 + + install/install_doc.rst + quick_start/index.rst + basics/index.rst + basics/learning_materials.md diff --git a/doc/fluid/new_docs/beginners_guide/install/install_doc.rst b/doc/fluid/new_docs/beginners_guide/install/install_doc.rst new file mode 100644 index 0000000000..8a66a95f45 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/install/install_doc.rst @@ -0,0 +1,543 @@ +.. _how_to_install: + +安装说明 +^^^^^^^^ + +若您的系统为Linux或Windows,您可以使用我们提供的安装包来安装PaddlePaddle。 + +对于MacOS系统,我们暂未提供安装包,您可以使用 **从源码编译** 的方式安装。 + + +.. _install_linux: + +在Linux安装PaddlePaddle +-------- + +推荐您使用 `pip `_ +安装,它是Linux系统下最简单的安装方式。 + +注意事项: + +- PaddlePaddle Python API 依赖Python 2.7版本。 + +执行下面的命令即可在当前机器上安装PaddlePaddle的运行时环境,并自动下载安装依赖软件。 + + .. code-block:: bash + + pip install paddlepaddle + +您可以通过指定版本号来安装其它版本,例如: + + .. code-block:: bash + + pip install paddlepaddle==0.13.0 + + +如果需要安装支持GPU的版本(cuda9.0_cudnn7_avx_openblas),需要执行: + + .. code-block:: bash + + pip install paddlepaddle-gpu + +PaddlePaddle针对不同需求提供了更多版本的安装包,部分列表如下: + +================================= ======================================== +版本号 版本说明 +================================= ======================================== +paddlepaddle-gpu==0.14.0 使用CUDA 9.0和cuDNN 7编译的0.14.0版本 +paddlepaddle-gpu==0.14.0.post87 使用CUDA 8.0和cuDNN 7编译的0.14.0版本 +paddlepaddle-gpu==0.14.0.post85 使用CUDA 8.0和cuDNN 5编译的0.14.0版本 +paddlepaddle-gpu==0.13.0 使用CUDA 9.0和cuDNN 7编译的0.13.0版本 +paddlepaddle-gpu==0.12.0 使用CUDA 8.0和cuDNN 5编译的0.12.0版本 +paddlepaddle-gpu==0.11.0.post87 使用CUDA 8.0和cuDNN 7编译的0.11.0版本 +paddlepaddle-gpu==0.11.0.post8 使用CUDA 8.0和cuDNN 5编译的0.11.0版本 +paddlepaddle-gpu==0.11.0 使用CUDA 7.5和cuDNN 5编译的0.11.0版本 +================================= ======================================== + +您可以在 `Release History `_ +中找到paddlepaddle-gpu的各个发行版本。 + +如果需要获取并安装最新的PaddlePaddle开发分支,可以从我们的 `CI系统 `_ 中下载最新的whl安装包和c-api开发包并安装。如需登录,请点击“Log in as guest”。 + +.. _FAQ: + +安装常见问题和解决方法 +====================== + +- paddlepaddle*.whl is not a supported wheel on this platform. + +出现这个问题的主要原因是,没有找到和当前系统匹配的paddlepaddle安装包。 +请检查Python版本是否为2.7系列。另外最新的pip官方源中的安装包默认是manylinux1标准, +需要使用最新的pip (>9.0.0) 才可以安装。 + +可以使用下面的命令更新您的pip: + + .. code-block:: bash + + pip install --upgrade pip + +如果仍然存在问题,可以执行: + + .. code-block:: bash + + python -c "import pip; print(pip.pep425tags.get_supported())" + +获取当前系统支持的安装包格式,并检查和需安装的包是否匹配。pypi安装包 +可以在 `这里 `_ 找到。 + +如果系统支持的是 linux_x86_64 而安装包是 manylinux1_x86_64 ,需要升级pip版本到最新; +如果系统支持 manylinux1_x86_64 而安装包(本地)是 linux_x86_64, +可以重命名这个whl包为 manylinux1_x86_64 再安装。 + + +.. _install_windows: + +在Windows安装PaddlePaddle +------------------------------ +Windows系统需要通过Docker来使用PaddleaPaddle。Docker是一个虚拟容器,使用Docker可以简化复杂的环境配置工作。 + +我们提供了 `PaddlePaddle_Windows快速安装包 `_, +它能够帮助您安装Docker和PaddlePaddle。 + +* 安装包支持的系统:Windows7,Windows8的所有版本,Windows10的专业版、企业版。 + +* 如果您希望使用GPU提升训练速度,请使用Linux系统安装,Windows系统暂不支持。 + +.. _install_mac: + +在MacOS安装PaddlePaddle +-------- + +对于MacOS系统,我们暂未提供pip安装方式,您可以使用 **源码编译** 的方式安装。 + +.. _others: + +其他安装方式 +------------- + +.. _source: +源码编译(使用Docker镜像) +========== + +.. _requirements: + +需要的软硬件 +""""""""""""" + +为了编译PaddlePaddle,我们需要 + +1. 一台电脑,可以装的是 Linux, Windows 或者 MacOS 操作系统 +2. Docker + +不需要依赖其他任何软件了。即便是 Python 和 GCC 都不需要,因为我们会把所有编译工具都安装进一个 Docker 镜像里。 + +.. _build_step: + +编译方法 +""""""""""""" + +PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像可以在 `这里 `_ 找到。 + + +**I. 编译CPU-Only版本的PaddlePaddle,需要执行:** + +.. code-block:: bash + + # 1. 获取源码 + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + # 2. 执行如下命令下载最新版本的docker镜像 + docker run --name paddle-test -v $PWD:/paddle --network=host -it docker.paddlepaddlehub.com/paddle:latest-dev /bin/bash + # 3. 进入docker内执行如下命令编译CPU-Only的二进制安装包 + mkdir -p /paddle/build && cd /paddle/build + cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF + make -j$(nproc) + +**II. 编译GPU版本的PaddlePaddle,需要执行:** + +.. code-block:: bash + + # 1. 获取源码 + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + # 2. 安装nvidia-docker + apt-get install nvidia-docker + # 3. 执行如下命令下载支持GPU运行的docker容器 + nvidia-docker run --name paddle-test-gpu -v $PWD:/paddle --network=host -it docker.paddlepaddlehub.com/paddle:latest-dev /bin/bash + # 4. 进入docker内执行如下命令编译GPU版本的PaddlePaddle + mkdir -p /paddle/build && cd /paddle/build + cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=ON -DWITH_TESTING=OFF + make -j$(nproc) + +**注意事项:** + +* 上述有关 :code:`docker` 的命令把当前目录(源码树根目录)映射为 container 里的 :code:`/paddle` 目录。 +* 进入 :code:`docker` 后执行 :code:`cmake` 命令,若是出现 :code:`patchelf not found, please install it.` 错误,则执行 :code:`apt-get install -y patchelf` 命令即可解决问题。 +* 若您在使用Docker编译PaddlePaddle遇到问题时, `这个issue `_ 可能会对您有所帮助。 + + +.. _source: +源码编译(不使用Docker镜像) +========== + +如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `附录:编译依赖`_ 之后才能开始编译的步骤。 + +.. _build_step: + +编译方法 +""""""""""""" + +在本机上编译CPU-Only版本的PaddlePaddle,需要执行如下命令: + +.. code-block:: bash + + # 1. 使用virtualenvwrapper创建python虚环境并将工作空间切换到虚环境 [可选] + mkvirtualenv paddle-venv + workon paddle-venv + # 2. 获取源码 + git clone https://github.com/PaddlePaddle/Paddle.git + cd Paddle + # 3. 执行下面的命令编译CPU-Only的二进制 + mkdir build && cd build + cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF + make -j4 # 根据机器配备CPU的核心数开启相应的多线程进行编译 + + +**注意事项:** + +* MacOS系统下因为默认安装了cblas库,所以编译时可能会遇到 :code:`use of undeclared identifier 'openblas_set_num_threads'` 错误。因此,在执行cmake命令时需要指定所使用openblas库的头文件路径,具体操作如下: + + .. code-block:: bash + + cd Paddle/build && rm -rf * + cmake .. -DWITH_FLUID_ONLY=ON -DWITH_GPU=OFF -DWITH_TESTING=OFF -DOPENBLAS_INC_DIR=/usr/local/Cellar/openblas/[本机所安装的openblas版本号]/include/ + make -j4 # 根据机器配备CPU的核心数开启相应的多线程进行编译 +* 若您在MacOS系统下从源码编译PaddlePaddle遇到问题时, `这个issue `_ 可能会对您有所帮助。 + +编译完成后会在build/python/dist目录下生成输出的whl包,可以选在在当前机器安装也可以拷贝到目标机器安装: + +.. code-block:: bash + + pip install build/python/dist/*.whl + +如果机器中已经安装过PaddlePaddle,有两种方法: + +.. code-block:: bash + + 1. 先卸载之前的版本,再重新安装 + pip uninstall paddlepaddle + pip install build/python/dist/*.whl + + 2. 直接升级到更新的版本 + pip install build/python/dist/*.whl -U + +.. _run_test: + +执行单元测试 +""""""""""""" + +如果您期望在编译完成后立即执行所有的单元测试,可以按照下面的方法: + +设置 :code:`RUN_TEST=ON` 和 :code:`WITH_TESTING=ON` 就会在完成编译之后,立即执行单元测试。 +开启 :code:`WITH_GPU=ON` 可以指定同时执行GPU上的单元测试。 + +.. code-block:: bash + + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" docker.paddlepaddlehub.com/paddle:latest-dev bash -x /paddle/paddle/scripts/paddle_build.sh build + +如果期望执行其中一个单元测试,(比如 :code:`test_sum_op` ): + +.. code-block:: bash + + docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" docker.paddlepaddlehub.com/paddle:latest-dev bash -x /paddle/paddle/scripts/paddle_build.sh build + cd /paddle/build + ctest -R test_sum_op -V + +.. _faq_docker: + +常见问题 +""""""""""""" + +- 什么是 Docker? + + 如果您没有听说 Docker,可以把它想象为一个类似 virtualenv 的系统,但是虚拟的不仅仅是 Python 的运行环境。 + +- Docker 还是虚拟机? + + 有人用虚拟机来类比 Docker。需要强调的是:Docker 不会虚拟任何硬件,Docker container 里运行的编译工具实际上都是在本机的 CPU 和操作系统上直接运行的,性能和把编译工具安装在本机运行一样。 + +- 为什么用 Docker? + + 把工具和配置都安装在一个 Docker image 里可以标准化编译环境。这样如果遇到问题,其他人可以复现问题以便帮助。 + + 另外,对于习惯使用Windows和MacOS的开发者来说,使用Docker就不用配置交叉编译环境了。 + +- 可以选择不用Docker吗? + + 当然可以。大家可以用把开发工具安装进入 Docker image 一样的方式,把这些工具安装到本机。这篇文档介绍基于 Docker 的开发流程,是因为这个流程比其他方法都更简便。 + +- 学习 Docker 有多难? + + 理解 Docker 并不难,大概花十分钟看一下 `这篇文章 `_。 + 这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 + +- 可以用 IDE 吗? + + 当然可以,因为源码就在本机上。IDE 默认调用 make 之类的程序来编译源码,我们只需要配置 IDE 来调用 Docker 命令编译源码即可。 + + 很多 PaddlePaddle 开发者使用 Emacs。他们在自己的 `~/.emacs` 配置文件里加两行 + + .. code-block:: bash + + (global-set-key "\C-cc" 'compile) + (setq compile-command + "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") + + 就可以按 `Ctrl-C` 和 `c` 键来启动编译了。 + +- 可以并行编译吗? + + 是的。我们的 Docker image 运行一个 `Bash 脚本 `_。这个脚本调用 :code:`make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 + +- Docker 需要 sudo + + 如果用自己的电脑开发,自然也就有管理员权限(sudo)了。如果用公用的电脑开发,需要请管理员安装和配置好 Docker。此外,PaddlePaddle 项目在努力开始支持其他不需要 sudo 的集装箱技术,比如 rkt。 + +- 在 Windows/MacOS 上编译很慢 + + Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考 `这个issue `_。 + +- 磁盘不够 + + 本文中的例子里, :code:`docker run` 命令里都用了 :code:`--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 :code:`docker ps -a` 命令看到停止后但是没有删除的 containers。 :code:`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考 `这篇文章 `_ 来清理这些内容。 + + +.. _compile_deps: + +附录:编译依赖 +""""""""""""" + +PaddlePaddle编译需要使用到下面的依赖(包含但不限于),其他的依赖软件,会自动在编译时下载。 + +.. csv-table:: PaddlePaddle编译依赖 + :header: "依赖", "版本", "说明" + :widths: 10, 15, 30 + + "CMake", "3.4", "" + "GCC", "4.8.2", "推荐使用CentOS的devtools2" + "Python", "2.7.x", "依赖libpython2.7.so" + "SWIG", ">=2.0", "" + "wget","","" + "openblas","","" + "pip", ">=9.0", "" + "numpy", "", "" + "protobuf","3.1.0","" + "wheel","","" + "Go", ">=1.8", "可选" + + +.. _build_options: + +附录:编译选项 +""""""""""""" + +PaddlePaddle的编译选项,包括生成CPU/GPU二进制文件、链接何种BLAS库等。 +用户可在调用cmake的时候设置它们,详细的cmake使用方法可以参考 +`官方文档 `_ 。 + +在cmake的命令行中,通过使用 ``-D`` 命令设置该类编译选项,例如: + +.. code-block:: bash + + cmake .. -DWITH_GPU=OFF + +.. csv-table:: 编译选项说明 + :header: "选项", "说明", "默认值" + :widths: 1, 7, 2 + + "WITH_GPU", "是否支持GPU", "ON" + "WITH_C_API", "是否仅编译CAPI", "OFF" + "WITH_DOUBLE", "是否使用双精度浮点数", "OFF" + "WITH_DSO", "是否运行时动态加载CUDA动态库,而非静态加载CUDA动态库。", "ON" + "WITH_AVX", "是否编译含有AVX指令集的PaddlePaddle二进制文件", "ON" + "WITH_PYTHON", "是否内嵌PYTHON解释器", "ON" + "WITH_STYLE_CHECK", "是否编译时进行代码风格检查", "ON" + "WITH_TESTING", "是否开启单元测试", "OFF" + "WITH_DOC", "是否编译中英文文档", "OFF" + "WITH_SWIG_PY", "是否编译PYTHON的SWIG接口,该接口可用于预测和定制化训练", "Auto" + "WITH_GOLANG", "是否编译go语言的可容错parameter server", "OFF" + "WITH_MKL", "是否使用MKL数学库,如果为否则是用OpenBLAS", "ON" + +BLAS ++++++ + +PaddlePaddle支持 `MKL `_ 和 +`OpenBlAS `_ 两种BLAS库。默认使用MKL。如果使用MKL并且机器含有AVX2指令集, +还会下载MKL-DNN数学库,详细参考 `这里 `_ 。 + +如果关闭MKL,则会使用OpenBLAS作为BLAS库。 + +CUDA/cuDNN ++++++++++++ + +PaddlePaddle在编译时/运行时会自动找到系统中安装的CUDA和cuDNN库进行编译和执行。 +使用参数 :code:`-DCUDA_ARCH_NAME=Auto` 可以指定开启自动检测SM架构,加速编译。 + +PaddlePaddle可以使用cuDNN v5.1之后的任何一个版本来编译运行,但尽量请保持编译和运行使用的cuDNN是同一个版本。 +我们推荐使用最新版本的cuDNN。 + +编译选项的设置 +++++++++++++++ + +PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( :code:`/usr/lib:/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 + +.. code-block:: bash + + cmake .. -DWITH_GPU=ON -DWITH_TESTING=OFF -DCUDNN_ROOT=/opt/cudnnv5 + +注意:这几个编译选项的设置,只在第一次cmake的时候有效。如果之后想要重新设置,推荐清理整个编译目录( :code:`rm -rf` )后,再指定。 + +.. _install_docker: + +使用Docker安装运行 +================== + +使用Docker安装和运行PaddlePaddle可以无需考虑依赖环境。 +您可以在 `Docker官网 `_ +获得基本的Docker安装和使用方法。 + +在了解Docker的基本使用方法之后,即可开始下面的步骤: + +.. _docker_pull: + +获取PaddlePaddle的Docker镜像 +"""""""""""""""""""""""""""" + +执行下面的命令获取最新的PaddlePaddle Docker镜像,版本为cpu_avx_mkl: + + .. code-block:: bash + + docker pull paddlepaddle/paddle + +对于国内用户,我们提供了加速访问的镜像源: + + .. code-block:: bash + + docker pull docker.paddlepaddlehub.com/paddle + +下载GPU版本(cuda8.0_cudnn5_avx_mkl)的Docker镜像: + + .. code-block:: bash + + docker pull paddlepaddle/paddle:latest-gpu + docker pull docker.paddlepaddlehub.com/paddle:latest-gpu + +选择下载使用不同的BLAS库的Docker镜像: + + .. code-block:: bash + + # 默认是使用MKL的镜像 + docker pull paddlepaddle/paddle + # 使用OpenBLAS的镜像 + docker pull paddlepaddle/paddle:latest-openblas + +下载指定版本的Docker镜像,可以从 `DockerHub网站 `_ 获取可选的tag,并执行下面的命令: + + .. code-block:: bash + + docker pull paddlepaddle/paddle:[tag] + # 比如: + docker pull docker.paddlepaddlehub.com/paddle:0.11.0-gpu + +.. _docker_run: + +在Docker中执行PaddlePaddle训练程序 +""""""""""""""""""""""""""""""""""" + +假设您已经在当前目录(比如在/home/work)编写了一个PaddlePaddle的程序 :code:`train.py` (可以参考 +`PaddlePaddleBook `_ +编写),就可以使用下面的命令开始执行训练: + + .. code-block:: bash + + cd /home/work + docker run -it -v $PWD:/work paddlepaddle/paddle /work/train.py + +上述命令中, :code:`-it` 参数说明容器已交互式运行; :code:`-v $PWD:/work` +指定将当前路径(Linux中$PWD变量会展开为当前路径的绝对路径)挂载到容器内部的 :code:`/work` +目录; :code:`paddlepaddle/paddle` 指定需要使用的容器; 最后 :code:`/work/train.py` +为容器内执行的命令,即运行训练程序。 + +当然,您也可以进入到Docker容器中,以交互式的方式执行或调试您的代码: + + .. code-block:: bash + docker run -it -v $PWD:/work paddlepaddle/paddle /bin/bash + cd /work + python train.py + +**注:PaddlePaddle Docker镜像为了减小体积,默认没有安装vim,您可以在容器中执行** :code:`apt-get install -y vim` **安装后,在容器中编辑代码。** + +.. _docker_run_book: + +使用Docker启动PaddlePaddle Book教程 +"""""""""""""""""""""""""""""""""""" + +使用Docker可以快速在本地启动一个包含了PaddlePaddle官方Book教程的Jupyter Notebook,可以通过网页浏览。 +PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Notebook。 +如果您想要更深入了解deep learning,PaddlePaddle Book一定是您最好的选择。 +大家可以通过它阅读教程,或者制作和分享带有代码、公式、图表、文字的交互式文档。 + +我们提供可以直接运行PaddlePaddle Book的Docker镜像,直接运行: + + .. code-block:: bash + + docker run -p 8888:8888 paddlepaddle/book + +国内用户可以使用下面的镜像源来加速访问: + + .. code-block: bash + + docker run -p 8888:8888 docker.paddlepaddlehub.com/book + +然后在浏览器中输入以下网址: + + .. code-block:: text + + http://localhost:8888/ + +就这么简单,享受您的旅程! + +.. _docker_run_gpu: + +使用Docker执行GPU训练 +"""""""""""""""""""""""""""" + +为了保证GPU驱动能够在镜像里面正常运行,我们推荐使用 +`nvidia-docker `_ 来运行镜像。 +请不要忘记提前在物理机上安装GPU最新驱动。 + + .. code-block:: bash + + nvidia-docker run -it -v $PWD:/work paddlepaddle/paddle:latest-gpu /bin/bash + +**注: 如果没有安装nvidia-docker,可以尝试以下的方法,将CUDA库和Linux设备挂载到Docker容器内:** + + .. code-block:: bash + + export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')" + export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}') + docker run ${CUDA_SO} ${DEVICES} -it paddlepaddle/paddle:latest-gpu + +**关于AVX:** + +AVX是一种CPU指令集,可以加速PaddlePaddle的计算。最新的PaddlePaddle Docker镜像默认 +是开启AVX编译的,所以,如果您的电脑不支持AVX,需要单独 +`编译 <./build_from_source_cn.html>`_ PaddlePaddle为no-avx版本。 + +以下指令能检查Linux电脑是否支持AVX: + + .. code-block:: bash + + if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi + +如果输出是No,就需要选择使用no-AVX的镜像 diff --git a/doc/fluid/new_docs/beginners_guide/install/paddleci.png b/doc/fluid/new_docs/beginners_guide/install/paddleci.png new file mode 100644 index 0000000000..16087ce059 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/install/paddleci.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md new file mode 100644 index 0000000000..ba43ada510 --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/README.cn.md @@ -0,0 +1,329 @@ +```eval_rst +.. _quick_start_fit_a_line: +``` +# 线性回归 +让我们从经典的线性回归(Linear Regression \[[1](#参考文献)\])模型开始这份教程。在这一章里,你将使用真实的数据集建立起一个房价预测模型,并且了解到机器学习中的若干重要概念。 + +本教程源代码目录在[book/fit_a_line](https://github.com/PaddlePaddle/book/tree/develop/01.fit_a_line), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 +给定一个大小为`$n$`的数据集 `${\{y_{i}, x_{i1}, ..., x_{id}\}}_{i=1}^{n}$`,其中`$x_{i1}, \ldots, x_{id}$`是第`$i$`个样本`$d$`个属性上的取值,`$y_i$`是该样本待预测的目标。线性回归模型假设目标`$y_i$`可以被属性间的线性组合描述,即 + +$$y_i = \omega_1x_{i1} + \omega_2x_{i2} + \ldots + \omega_dx_{id} + b, i=1,\ldots,n$$ + +例如,在我们将要建模的房价预测问题里,`$x_{ij}$`是描述房子`$i$`的各种属性(比如房间的个数、周围学校和医院的个数、交通状况等),而 `$y_i$`是房屋的价格。 + +初看起来,这个假设实在过于简单了,变量间的真实关系很难是线性的。但由于线性回归模型有形式简单和易于建模分析的优点,它在实际问题中得到了大量的应用。很多经典的统计学习、机器学习书籍\[[2,3,4](#参考文献)\]也选择对线性模型独立成章重点讲解。 + +## 效果展示 +我们使用从[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing)获得的波士顿房价数据集进行模型的训练和预测。下面的散点图展示了使用模型对部分房屋价格进行的预测。其中,每个点的横坐标表示同一类房屋真实价格的中位数,纵坐标表示线性回归模型根据特征预测的结果,当二者值完全相等的时候就会落在虚线上。所以模型预测得越准确,则点离虚线越近。 + +![BostonHousePricePredictions](./image/predictions.png) +

图1. 预测值 V.S. 真实值

+ +## 模型概览 + +### 模型定义 + +在波士顿房价数据集中,和房屋相关的值共有14个:前13个用来描述房屋相关的各种信息,即模型中的 `$x_i$`;最后一个值为我们要预测的该类房屋价格的中位数,即模型中的 `$y_i$`。因此,我们的模型就可以表示成: + +$$\hat{Y} = \omega_1X_{1} + \omega_2X_{2} + \ldots + \omega_{13}X_{13} + b$$ + +`$\hat{Y}$` 表示模型的预测结果,用来和真实值`$Y$`区分。模型要学习的参数即:`$\omega_1, \ldots, \omega_{13}, b$`。 + +建立模型后,我们需要给模型一个优化目标,使得学到的参数能够让预测值`$\hat{Y}$`尽可能地接近真实值`$Y$`。这里我们引入损失函数([Loss Function](https://en.wikipedia.org/wiki/Loss_function),或Cost Function)这个概念。 输入任意一个数据样本的目标值`$y_{i}$`和模型给出的预测值`$\hat{y_{i}}$`,损失函数输出一个非负的实值。这个实值通常用来反映模型误差的大小。 + +对于线性回归模型来讲,最常见的损失函数就是均方误差(Mean Squared Error, [MSE](https://en.wikipedia.org/wiki/Mean_squared_error))了,它的形式是: + +$$MSE=\frac{1}{n}\sum_{i=1}^{n}{(\hat{Y_i}-Y_i)}^2$$ + +即对于一个大小为`$n$`的测试集,`$MSE$`是`$n$`个数据预测结果误差平方的均值。 + +### 训练过程 + +定义好模型结构之后,我们要通过以下几个步骤进行模型训练 +1. 初始化参数,其中包括权重`$\omega_i$`和偏置`$b$`,对其进行初始化(如0均值,1方差)。 +2. 网络正向传播计算网络输出和损失函数。 +3. 根据损失函数进行反向误差传播 ([backpropagation](https://en.wikipedia.org/wiki/Backpropagation)),将网络误差从输出层依次向前传递, 并更新网络中的参数。 +4. 重复2~3步骤,直至网络训练误差达到规定的程度或训练轮次达到设定值。 + +## 数据集 + +### 数据集介绍 +这份数据集共506行,每行包含了波士顿郊区的一类房屋的相关信息及该类房屋价格的中位数。其各维属性的意义如下: + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
属性名解释类型
CRIM该镇的人均犯罪率连续值
ZN占地面积超过25,000平方呎的住宅用地比例连续值
INDUS非零售商业用地比例连续值
CHAS是否邻近 Charles River离散值,1=邻近;0=不邻近
NOX一氧化氮浓度连续值
RM每栋房屋的平均客房数连续值
AGE1940年之前建成的自用单位比例连续值
DIS到波士顿5个就业中心的加权距离连续值
RAD到径向公路的可达性指数连续值
TAX全值财产税率连续值
PTRATIO学生与教师的比例连续值
B1000(BK - 0.63)^2,其中BK为黑人占比连续值
LSTAT低收入人群占比连续值
MEDV同类房屋价格的中位数连续值
+

+ +### 数据预处理 +#### 连续值与离散值 +观察一下数据,我们的第一个发现是:所有的13维属性中,有12维的连续值和1维的离散值(CHAS)。离散值虽然也常使用类似0、1、2这样的数字表示,但是其含义与连续值是不同的,因为这里的差值没有实际意义。例如,我们用0、1、2来分别表示红色、绿色和蓝色的话,我们并不能因此说“蓝色和红色”比“绿色和红色”的距离更远。所以通常对一个有`$d$`个可能取值的离散属性,我们会将它们转为`$d$`个取值为0或1的二值属性或者将每个可能取值映射为一个多维向量。不过就这里而言,因为CHAS本身就是一个二值属性,就省去了这个麻烦。 + +#### 属性的归一化 +另外一个稍加观察即可发现的事实是,各维属性的取值范围差别很大(如图2所示)。例如,属性B的取值范围是[0.32, 396.90],而属性NOX的取值范围是[0.3850, 0.8170]。这里就要用到一个常见的操作-归一化(normalization)了。归一化的目标是把各位属性的取值范围放缩到差不多的区间,例如[-0.5,0.5]。这里我们使用一种很常见的操作方法:减掉均值,然后除以原取值范围。 + +做归一化(或 [Feature scaling](https://en.wikipedia.org/wiki/Feature_scaling))至少有以下3个理由: +- 过大或过小的数值范围会导致计算时的浮点上溢或下溢。 +- 不同的数值范围会导致不同属性对模型的重要性不同(至少在训练的初始阶段如此),而这个隐含的假设常常是不合理的。这会对优化的过程造成困难,使训练时间大大的加长。 +- 很多的机器学习技巧/模型(例如L1,L2正则项,向量空间模型-Vector Space Model)都基于这样的假设:所有的属性取值都差不多是以0为均值且取值范围相近的。 + +![featureScale](./image/ranges.png) +

图2. 各维属性的取值范围

+ +#### 整理训练集与测试集 +我们将数据集分割为两份:一份用于调整模型的参数,即进行模型的训练,模型在这份数据集上的误差被称为**训练误差**;另外一份被用来测试,模型在这份数据集上的误差被称为**测试误差**。我们训练模型的目的是为了通过从训练数据中找到规律来预测未知的新数据,所以测试误差是更能反映模型表现的指标。分割数据的比例要考虑到两个因素:更多的训练数据会降低参数估计的方差,从而得到更可信的模型;而更多的测试数据会降低测试误差的方差,从而得到更可信的测试误差。我们这个例子中设置的分割比例为`$8:2$` + + +在更复杂的模型训练过程中,我们往往还会多使用一种数据集:验证集。因为复杂的模型中常常还有一些超参数([Hyperparameter](https://en.wikipedia.org/wiki/Hyperparameter_optimization))需要调节,所以我们会尝试多种超参数的组合来分别训练多个模型,然后对比它们在验证集上的表现选择相对最好的一组超参数,最后才使用这组参数下训练的模型在测试集上评估测试误差。由于本章训练的模型比较简单,我们暂且忽略掉这个过程。 + +## 训练 + +`fit_a_line/trainer.py`演示了训练的整体过程。 + +### 配置数据提供器(Datafeeder) +首先我们引入必要的库: +```python +import paddle +import paddle.fluid as fluid +import numpy +``` + +我们通过uci_housing模块引入了数据集合[UCI Housing Data Set](https://archive.ics.uci.edu/ml/datasets/Housing) + +其中,在uci_housing模块中封装了: + +1. 数据下载的过程。下载数据保存在~/.cache/paddle/dataset/uci_housing/housing.data。 +2. [数据预处理](#数据预处理)的过程。 + +接下来我们定义了用于训练和测试的数据提供器。提供器每次读入一个大小为`BATCH_SIZE`的数据批次。如果用户希望加一些随机性,她可以同时定义一个批次大小和一个缓存大小。这样的话,每次数据提供器会从缓存中随机读取批次大小那么多的数据。 + +```python +BATCH_SIZE = 20 + +train_reader = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.uci_housing.train(), buf_size=500), +batch_size=BATCH_SIZE) + +test_reader = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.uci_housing.test(), buf_size=500), +batch_size=BATCH_SIZE) +``` + +### 配置训练程序 +训练程序的目的是定义一个训练模型的网络结构。对于线性回归来讲,它就是一个从输入到输出的简单的全连接层。更加复杂的结果,比如卷积神经网络,递归神经网络等会在随后的章节中介绍。训练程序必须返回`平均损失`作为第一个返回值,因为它会被后面反向传播算法所用到。 + +```python +def train_program(): +y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +# feature vector of length 13 +x = fluid.layers.data(name='x', shape=[13], dtype='float32') +y_predict = fluid.layers.fc(input=x, size=1, act=None) + +loss = fluid.layers.square_error_cost(input=y_predict, label=y) +avg_loss = fluid.layers.mean(loss) + +return avg_loss +``` + +### 定义运算场所 +我们可以定义运算是发生在CPU还是GPU + +```python +use_cuda = False +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() +``` + +### 创建训练器 +训练器会读入一个训练程序和一些必要的其他参数: + +```python +trainer = fluid.Trainer( +train_func=train_program, +place=place, +optimizer_func=fluid.optimizer.SGD(learning_rate=0.001)) +``` + +### 开始提供数据 +PaddlePaddle提供了读取数据者发生器机制来读取训练数据。读取数据者会一次提供多列数据,因此我们需要一个Python的list来定义读取顺序。 + +```python +feed_order=['x', 'y'] +``` + +除此之外,可以定义一个事件相应器来处理类似`打印训练进程`的事件: + +```python +# Specify the directory path to save the parameters +params_dirname = "fit_a_line.inference.model" + +# Plot data +from paddle.v2.plot import Ploter +train_title = "Train cost" +test_title = "Test cost" +plot_cost = Ploter(train_title, test_title) + +step = 0 + +# event_handler to print training and testing info +def event_handler_plot(event): +global step +if isinstance(event, fluid.EndStepEvent): +if event.step % 10 == 0: # every 10 batches, record a test cost +test_metrics = trainer.test( +reader=test_reader, feed_order=feed_order) + +plot_cost.append(test_title, step, test_metrics[0]) +plot_cost.plot() + +if test_metrics[0] < 10.0: +# If the accuracy is good enough, we can stop the training. +print('loss is less than 10.0, stop') +trainer.stop() + +# We can save the trained parameters for the inferences later +if params_dirname is not None: +trainer.save_params(params_dirname) + +step += 1 +``` + +### 开始训练 +我们现在可以通过调用`trainer.train()`来开始训练 + +```python +%matplotlib inline + +# The training could take up to a few minutes. +trainer.train( +reader=train_reader, +num_epochs=100, +event_handler=event_handler_plot, +feed_order=feed_order) +``` + +![trainTestCost](./image/train_and_test.png) + +## 预测 +提供一个`inference_program`和一个`params_dirname`来初始化预测器。`params_dirname`用来存储我们的参数。 + +### 设定预测程序 +类似于`trainer.train`,预测器需要一个预测程序来做预测。我们可以稍加修改我们的训练程序来把预测值包含进来。 + + +```python +def inference_program(): +x = fluid.layers.data(name='x', shape=[13], dtype='float32') +y_predict = fluid.layers.fc(input=x, size=1, act=None) +return y_predict +``` + +### 预测 +预测器会从`params_dirname`中读取已经训练好的模型,来对从未遇见过的数据进行预测。 + +```python +inferencer = fluid.Inferencer( +infer_func=inference_program, param_path=params_dirname, place=place) + +batch_size = 10 +tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") + +results = inferencer.infer({'x': tensor_x}) +print("infer results: ", results[0]) +``` + +## 总结 +在这章里,我们借助波士顿房价这一数据集,介绍了线性回归模型的基本概念,以及如何使用PaddlePaddle实现训练和测试的过程。很多的模型和技巧都是从简单的线性回归模型演化而来,因此弄清楚线性模型的原理和局限非常重要。 + + +## 参考文献 +1. https://en.wikipedia.org/wiki/Linear_regression +2. Friedman J, Hastie T, Tibshirani R. The elements of statistical learning[M]. Springer, Berlin: Springer series in statistics, 2001. +3. Murphy K P. Machine learning: a probabilistic perspective[M]. MIT press, 2012. +4. Bishop C M. Pattern recognition[J]. Machine Learning, 2006, 128. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions.png new file mode 100644 index 0000000000..27e4acb131 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/predictions.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges.png new file mode 100644 index 0000000000..5d86b12715 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/ranges.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/train_and_test.png b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/train_and_test.png new file mode 100644 index 0000000000..bcd304a6a0 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/fit_a_line/image/train_and_test.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/index.rst b/doc/fluid/new_docs/beginners_guide/quick_start/index.rst new file mode 100644 index 0000000000..f5889ba52b --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/quick_start/index.rst @@ -0,0 +1,13 @@ +######## +快速入门 +######## + +.. todo:: + + 概述 + +.. toctree:: + :maxdepth: 2 + + fit_a_line/README.cn.md + recognize_digits/README.cn.md diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md new file mode 100644 index 0000000000..c04a949a3f --- /dev/null +++ b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/README.cn.md @@ -0,0 +1,453 @@ +# 识别数字 + +本教程源代码目录在[book/recognize_digits](https://github.com/PaddlePaddle/book/tree/develop/02.recognize_digits), 初次使用请参考PaddlePaddle[安装教程](https://github.com/PaddlePaddle/book/blob/develop/README.cn.md#运行这本书)。 + +## 背景介绍 +当我们学习编程的时候,编写的第一个程序一般是实现打印"Hello World"。而机器学习(或深度学习)的入门教程,一般都是 [MNIST](http://yann.lecun.com/exdb/mnist/) 数据库上的手写识别问题。原因是手写识别属于典型的图像分类问题,比较简单,同时MNIST数据集也很完备。MNIST数据集作为一个简单的计算机视觉数据集,包含一系列如图1所示的手写数字图片和对应的标签。图片是28x28的像素矩阵,标签则对应着0~9的10个数字。每张图片都经过了大小归一化和居中处理。 + +![MNIST](./image/mnist_example_image.png) +

图1. MNIST图片示例

+ +MNIST数据集是从 [NIST](https://www.nist.gov/srd/nist-special-database-19) 的Special Database 3(SD-3)和Special Database 1(SD-1)构建而来。由于SD-3是由美国人口调查局的员工进行标注,SD-1是由美国高中生进行标注,因此SD-3比SD-1更干净也更容易识别。Yann LeCun等人从SD-1和SD-3中各取一半作为MNIST的训练集(60000条数据)和测试集(10000条数据),其中训练集来自250位不同的标注员,此外还保证了训练集和测试集的标注员是不完全相同的。 + +Yann LeCun早先在手写字符识别上做了很多研究,并在研究过程中提出了卷积神经网络(Convolutional Neural Network),大幅度地提高了手写字符的识别能力,也因此成为了深度学习领域的奠基人之一。如今的深度学习领域,卷积神经网络占据了至关重要的地位,从最早Yann LeCun提出的简单LeNet,到如今ImageNet大赛上的优胜模型VGGNet、GoogLeNet、ResNet等(请参见[图像分类](https://github.com/PaddlePaddle/book/tree/develop/03.image_classification) 教程),人们在图像分类领域,利用卷积神经网络得到了一系列惊人的结果。 + +有很多算法在MNIST上进行实验。1998年,LeCun分别用单层线性分类器、多层感知器(Multilayer Perceptron, MLP)和多层卷积神经网络LeNet进行实验,使得测试集上的误差不断下降(从12%下降到0.7%)\[[1](#参考文献)\]。此后,科学家们又基于K近邻(K-Nearest Neighbors)算法\[[2](#参考文献)\]、支持向量机(SVM)\[[3](#参考文献)\]、神经网络\[[4-7](#参考文献)\]和Boosting方法\[[8](#参考文献)\]等做了大量实验,并采用多种预处理方法(如去除歪曲、去噪、模糊等)来提高识别的准确率。 + +本教程中,我们从简单的模型Softmax回归开始,带大家入门手写字符识别,并逐步进行模型优化。 + + +## 模型概览 + +基于MNIST数据训练一个分类器,在介绍本教程使用的三个基本图像分类网络前,我们先给出一些定义: +- `$X$`是输入:MNIST图片是`$28\times28$` 的二维图像,为了进行计算,我们将其转化为`$784$`维向量,即`$X=\left ( x_0, x_1, \dots, x_{783} \right )$`。 +- `$Y$`是输出:分类器的输出是10类数字(0-9),即`$Y=\left ( y_0, y_1, \dots, y_9 \right )$`,每一维`$y_i$`代表图片分类为第`$i$`类数字的概率。 +- `$L$`是图片的真实标签:`$L=\left ( l_0, l_1, \dots, l_9 \right )$`也是10维,但只有一维为1,其他都为0。 + +### Softmax回归(Softmax Regression) + +最简单的Softmax回归模型是先将输入层经过一个全连接层得到的特征,然后直接通过softmax 函数进行多分类\[[9](#参考文献)\]。 + +输入层的数据`$X$`传到输出层,在激活操作之前,会乘以相应的权重 `$W$` ,并加上偏置变量 `$b$` ,具体如下: + +$$ y_i = \text{softmax}(\sum_j W_{i,j}x_j + b_i) $$ + +其中 `$ \text{softmax}(x_i) = \frac{e^{x_i}}{\sum_j e^{x_j}} $` + +对于有 `$N$` 个类别的多分类问题,指定 `$N$` 个输出节点,`$N$` 维结果向量经过softmax将归一化为 `$N$` 个[0,1]范围内的实数值,分别表示该样本属于这 `$N$` 个类别的概率。此处的 `$y_i$` 即对应该图片为数字 `$i$` 的预测概率。 + +在分类问题中,我们一般采用交叉熵代价损失函数(cross entropy),公式如下: + +$$ \text{crossentropy}(label, y) = -\sum_i label_ilog(y_i) $$ + +图2为softmax回归的网络图,图中权重用蓝线表示、偏置用红线表示、+1代表偏置参数的系数为1。 + +![softmaxRegression](./image/softmax_regression.png) +

图2. softmax回归网络结构图

+ +### 多层感知器(Multilayer Perceptron, MLP) + +Softmax回归模型采用了最简单的两层神经网络,即只有输入层和输出层,因此其拟合能力有限。为了达到更好的识别效果,我们考虑在输入层和输出层中间加上若干个隐藏层\[[10](#参考文献)\]。 + +1. 经过第一个隐藏层,可以得到 `$ H_1 = \phi(W_1X + b_1) $`,其中`$\phi$`代表激活函数,常见的有sigmoid、tanh或ReLU等函数。 +2. 经过第二个隐藏层,可以得到 `$ H_2 = \phi(W_2H_1 + b_2) $`。 +3. 最后,再经过输出层,得到的`$Y=\text{softmax}(W_3H_2 + b_3)$`,即为最后的分类结果向量。 + + +图3为多层感知器的网络结构图,图中权重用蓝线表示、偏置用红线表示、+1代表偏置参数的系数为1。 + +![multilayerPerceptron](./image/mlp.png) +

图3. 多层感知器网络结构图

+ +### 卷积神经网络(Convolutional Neural Network, CNN) + +在多层感知器模型中,将图像展开成一维向量输入到网络中,忽略了图像的位置和结构信息,而卷积神经网络能够更好的利用图像的结构信息。[LeNet-5](http://yann.lecun.com/exdb/lenet/)是一个较简单的卷积神经网络。图4显示了其结构:输入的二维图像,先经过两次卷积层到池化层,再经过全连接层,最后使用softmax分类作为输出层。下面我们主要介绍卷积层和池化层。 + +![cnnStructure](./image/cnn.png) +

图4. LeNet-5卷积神经网络结构

+ +#### 卷积层 + +卷积层是卷积神经网络的核心基石。在图像识别里我们提到的卷积是二维卷积,即离散二维滤波器(也称作卷积核)与二维图像做卷积操作,简单的讲是二维滤波器滑动到二维图像上所有位置,并在每个位置上与该像素点及其领域像素点做内积。卷积操作被广泛应用与图像处理领域,不同卷积核可以提取不同的特征,例如边沿、线性、角等特征。在深层卷积神经网络中,通过卷积操作可以提取出图像低级到复杂的特征。 + +![cnn](https://raw.githubusercontent.com/PaddlePaddle/book/develop/02.recognize_digits/image/conv_layer.png) +

图5. 卷积层图片

+ +图5给出一个卷积计算过程的示例图,输入图像大小为`$H=5,W=5,D=3$`,即`$5 \times 5$`大小的3通道(RGB,也称作深度)彩色图像。这个示例图中包含两(用`$K$`表示)组卷积核,即图中滤波器`$W_0$`和`$W_1$`。在卷积计算中,通常对不同的输入通道采用不同的卷积核,如图示例中每组卷积核包含(`$D=3$`)个`$3 \times 3$`(用`$F \times F$`表示)大小的卷积核。另外,这个示例中卷积核在图像的水平方向(`$W$`方向)和垂直方向(`$H$`方向)的滑动步长为2(用`$S$`表示);对输入图像周围各填充1(用`$P$`表示)个0,即图中输入层原始数据为蓝色部分,灰色部分是进行了大小为1的扩展,用0来进行扩展。经过卷积操作得到输出为`$3 \times 3 \times 2$`(用`$H_{o} \times W_{o} \times K$`表示)大小的特征图,即`$3 \times 3$`大小的2通道特征图,其中`$H_o$`计算公式为:`$H_o = (H - F + 2 \times P)/S + 1$`,`$W_o$`同理。 而输出特征图中的每个像素,是每组滤波器与输入图像每个特征图的内积再求和,再加上偏置`$b_o$`,偏置通常对于每个输出特征图是共享的。输出特征图`$o[:,:,0]$`中的最后一个`$-2$`计算如图5右下角公式所示。 + +在卷积操作中卷积核是可学习的参数,经过上面示例介绍,每层卷积的参数大小为`$D \times F \times F \times K$`。在多层感知器模型中,神经元通常是全部连接,参数较多。而卷积层的参数较少,这也是由卷积层的主要特性即局部连接和共享权重所决定。 + +- 局部连接:每个神经元仅与输入神经元的一块区域连接,这块局部区域称作感受野(receptive field)。在图像卷积操作中,即神经元在空间维度(spatial dimension,即上图示例H和W所在的平面)是局部连接,但在深度上是全部连接。对于二维图像本身而言,也是局部像素关联较强。这种局部连接保证了学习后的过滤器能够对于局部的输入特征有最强的响应。局部连接的思想,也是受启发于生物学里面的视觉系统结构,视觉皮层的神经元就是局部接受信息的。 + +- 权重共享:计算同一个深度切片的神经元时采用的滤波器是共享的。例如图4中计算`$o[:,:,0]$`的每个每个神经元的滤波器均相同,都为`$W_0$`,这样可以很大程度上减少参数。共享权重在一定程度上讲是有意义的,例如图片的底层边缘特征与特征在图中的具体位置无关。但是在一些场景中是无意的,比如输入的图片是人脸,眼睛和头发位于不同的位置,希望在不同的位置学到不同的特征 (参考[斯坦福大学公开课]( http://cs231n.github.io/convolutional-networks/))。请注意权重只是对于同一深度切片的神经元是共享的,在卷积层,通常采用多组卷积核提取不同特征,即对应不同深度切片的特征,不同深度切片的神经元权重是不共享。另外,偏重对同一深度切片的所有神经元都是共享的。 + +通过介绍卷积计算过程及其特性,可以看出卷积是线性操作,并具有平移不变性(shift-invariant),平移不变性即在图像每个位置执行相同的操作。卷积层的局部连接和权重共享使得需要学习的参数大大减小,这样也有利于训练较大卷积神经网络。 + +#### 池化层 + +![pooling](./image/max_pooling.png) +

图6. 池化层图片

+ +池化是非线性下采样的一种形式,主要作用是通过减少网络的参数来减小计算量,并且能够在一定程度上控制过拟合。通常在卷积层的后面会加上一个池化层。池化包括最大池化、平均池化等。其中最大池化是用不重叠的矩形框将输入层分成不同的区域,对于每个矩形框的数取最大值作为输出层,如图6所示。 + +更详细的关于卷积神经网络的具体知识可以参考[斯坦福大学公开课]( http://cs231n.github.io/convolutional-networks/ )和[图像分类](https://github.com/PaddlePaddle/book/blob/develop/image_classification/README.md)教程。 + +### 常见激活函数介绍 +- sigmoid激活函数: `$ f(x) = sigmoid(x) = \frac{1}{1+e^{-x}} $` + +- tanh激活函数: `$ f(x) = tanh(x) = \frac{e^x-e^{-x}}{e^x+e^{-x}} $` + +实际上,tanh函数只是规模变化的sigmoid函数,将sigmoid函数值放大2倍之后再向下平移1个单位:tanh(x) = 2sigmoid(2x) - 1 。 + +- ReLU激活函数: `$ f(x) = max(0, x) $` + +更详细的介绍请参考[维基百科激活函数](https://en.wikipedia.org/wiki/Activation_function)。 + +## 数据介绍 + +PaddlePaddle在API中提供了自动加载[MNIST](http://yann.lecun.com/exdb/mnist/)数据的模块`paddle.dataset.mnist`。加载后的数据位于`/home/username/.cache/paddle/dataset/mnist`下: + +

+ + + + + + + + + + + + + + + + + + + + + + + + + + +
文件名称说明
train-images-idx3-ubyte训练数据图片,60,000条数据
train-labels-idx1-ubyte训练数据标签,60,000条数据
t10k-images-idx3-ubyte测试数据图片,10,000条数据
t10k-labels-idx1-ubyte测试数据标签,10,000条数据
+

+ +## Fluid API 概述 + +演示将使用最新的 `Fluid API`。Fluid API是最新的 PaddlePaddle API。它在不牺牲性能的情况下简化了模型配置。 +我们建议使用 Fluid API,因为它更容易学起来。 + +下面是快速的 Fluid API 概述。 +1. `inference_program`:指定如何从数据输入中获得预测的函数。 +这是指定网络流的地方。 + +1. `train_program`:指定如何从 `inference_program` 和`标签值`中获取 `loss` 的函数。 +这是指定损失计算的地方。 + +1. `optimizer_func`: “指定优化器配置的函数。优化器负责减少损失并驱动培训。Paddle 支持多种不同的优化器。 + +1. `Trainer`:PaddlePaddle Trainer 管理由 `train_program` 和 `optimizer` 指定的训练过程。 +通过 `event_handler` 回调函数,用户可以监控培训的进展。 + +1. `Inferencer`:Fluid inferencer 加载 `inference_program` 和由 Trainer 训练的参数。 +然后,它可以推断数据和返回预测。 + +在这个演示中,我们将深入了解它们。 + +## 配置说明 +加载 PaddlePaddle 的 Fluid API 包。 + +```python +import paddle +import paddle.fluid as fluid +``` + +### Program Functions 配置 + +我们需要设置“推理程序”函数。我们想用这个程序来演示三个不同的分类器,每个分类器都定义为 Python 函数。 +我们需要将图像数据馈送到分类器。Paddle 为读取数据提供了一个特殊的层 `layer.data` 层。 +让我们创建一个数据层来读取图像并将其连接到分类网络。 + +- Softmax回归:只通过一层简单的以softmax为激活函数的全连接层,就可以得到分类的结果。 + +```python +def softmax_regression(): +img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') +predict = fluid.layers.fc( +input=img, size=10, act='softmax') +return predict +``` + +- 多层感知器:下面代码实现了一个含有两个隐藏层(即全连接层)的多层感知器。其中两个隐藏层的激活函数均采用ReLU,输出层的激活函数用Softmax。 + +```python +def multilayer_perceptron(): +img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') +# 第一个全连接层,激活函数为ReLU +hidden = fluid.layers.fc(input=img, size=200, act='relu') +# 第二个全连接层,激活函数为ReLU +hidden = fluid.layers.fc(input=hidden, size=200, act='relu') +# 以softmax为激活函数的全连接输出层,输出层的大小必须为数字的个数10 +prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') +return prediction +``` + +- 卷积神经网络LeNet-5: 输入的二维图像,首先经过两次卷积层到池化层,再经过全连接层,最后使用以softmax为激活函数的全连接层作为输出层。 + +```python +def convolutional_neural_network(): +img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') +# 第一个卷积-池化层 +conv_pool_1 = fluid.nets.simple_img_conv_pool( +input=img, +filter_size=5, +num_filters=20, +pool_size=2, +pool_stride=2, +act="relu") +conv_pool_1 = fluid.layers.batch_norm(conv_pool_1) +# 第二个卷积-池化层 +conv_pool_2 = fluid.nets.simple_img_conv_pool( +input=conv_pool_1, +filter_size=5, +num_filters=50, +pool_size=2, +pool_stride=2, +act="relu") +# 以softmax为激活函数的全连接输出层,输出层的大小必须为数字的个数10 +prediction = fluid.layers.fc(input=conv_pool_2, size=10, act='softmax') +return prediction +``` + +#### Train Program 配置 +然后我们需要设置训练程序 `train_program`。它首先从分类器中进行预测。 +在训练期间,它将从预测中计算 `avg_cost`。 + +**注意:** 训练程序应该返回一个数组,第一个返回参数必须是 `avg_cost`。训练器使用它来计算梯度。 + +请随意修改代码,测试 Softmax 回归 `softmax_regression`, `MLP` 和 卷积神经网络 `convolutional neural network` 分类器之间的不同结果。 + +```python +def train_program(): +label = fluid.layers.data(name='label', shape=[1], dtype='int64') + +# predict = softmax_regression() # uncomment for Softmax回归 +# predict = multilayer_perceptron() # uncomment for 多层感知器 +predict = convolutional_neural_network() # uncomment for LeNet5卷积神经网络 +cost = fluid.layers.cross_entropy(input=predict, label=label) +avg_cost = fluid.layers.mean(cost) +acc = fluid.layers.accuracy(input=predict, label=label) +return [avg_cost, acc] + + +# 该模型运行在单个CPU上 +``` + +#### Optimizer Function 配置 + +在下面的 `Adam optimizer`,`learning_rate` 是训练的速度,与网络的训练收敛速度有关系。 + +```python +def optimizer_program(): +return fluid.optimizer.Adam(learning_rate=0.001) +``` + +### 数据集 Feeders 配置 + +下一步,我们开始训练过程。`paddle.dataset.movielens.train()`和`paddle.dataset.movielens.test()`分别做训练和测试数据集。这两个函数各自返回一个reader——PaddlePaddle中的reader是一个Python函数,每次调用的时候返回一个Python yield generator。 + +下面`shuffle`是一个reader decorator,它接受一个reader A,返回另一个reader B —— reader B 每次读入`buffer_size`条训练数据到一个buffer里,然后随机打乱其顺序,并且逐条输出。 + +`batch`是一个特殊的decorator,它的输入是一个reader,输出是一个batched reader —— 在PaddlePaddle里,一个reader每次yield一条训练数据,而一个batched reader每次yield一个minibatch。 + +```python +train_reader = paddle.batch( +paddle.reader.shuffle( +paddle.dataset.mnist.train(), buf_size=500), +batch_size=64) + +test_reader = paddle.batch( +paddle.dataset.mnist.test(), batch_size=64) +``` + +### Trainer 配置 + +现在,我们需要配置 `Trainer`。`Trainer` 需要接受训练程序 `train_program`, `place` 和优化器 `optimizer`。 + +```python +# 该模型运行在单个CPU上 +use_cuda = False # set to True if training with GPU +place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + +trainer = fluid.Trainer( +train_func=train_program, place=place, optimizer_func=optimizer_program) +``` + +#### Event Handler 配置 + +Fluid API 在训练期间为回调函数提供了一个钩子。用户能够通过机制监控培训进度。 +我们将在这里演示两个 `event_handler` 程序。请随意修改 Jupyter 笔记本 ,看看有什么不同。 + +`event_handler` 用来在训练过程中输出训练结果 + +```python +# Save the parameter into a directory. The Inferencer can load the parameters from it to do infer +params_dirname = "recognize_digits_network.inference.model" +lists = [] +def event_handler(event): +if isinstance(event, fluid.EndStepEvent): +if event.step % 100 == 0: +# event.metrics maps with train program return arguments. +# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example. +print "Pass %d, Batch %d, Cost %f" % ( +event.step, event.epoch, event.metrics[0]) + +if isinstance(event, fluid.EndEpochEvent): +avg_cost, acc = trainer.test( +reader=test_reader, feed_order=['img', 'label']) + +print("Test with Epoch %d, avg_cost: %s, acc: %s" % (event.epoch, avg_cost, acc)) + +# save parameters +trainer.save_params(params_dirname) +lists.append((event.epoch, avg_cost, acc)) +``` + +`event_handler_plot` 可以用来在训练过程中画图如下: + +![png](./image/train_and_test.png) + +```python +from paddle.v2.plot import Ploter + +train_title = "Train cost" +test_title = "Test cost" +cost_ploter = Ploter(train_title, test_title) +step = 0 +lists = [] + +# event_handler to plot a figure +def event_handler_plot(event): +global step +if isinstance(event, fluid.EndStepEvent): +if step % 100 == 0: +# event.metrics maps with train program return arguments. +# event.metrics[0] will yeild avg_cost and event.metrics[1] will yeild acc in this example. +cost_ploter.append(train_title, step, event.metrics[0]) +cost_ploter.plot() +step += 1 +if isinstance(event, fluid.EndEpochEvent): +# save parameters +trainer.save_params(params_dirname) + +avg_cost, acc = trainer.test( +reader=test_reader, feed_order=['img', 'label']) +cost_ploter.append(test_title, step, avg_cost) +lists.append((event.epoch, avg_cost, acc)) +``` + +#### 开始训练 + +既然我们设置了 `event_handler` 和 `data reader`,我们就可以开始训练模型了。 + +`feed_order` 用于将数据目录映射到 `train_program` + +```python +trainer.train( +num_epochs=5, +event_handler=event_handler, +reader=train_reader, +feed_order=['img', 'label']) +``` + +训练过程是完全自动的,event_handler里打印的日志类似如下所示: + +``` +Pass 0, Batch 0, Cost 0.125650 +Pass 100, Batch 0, Cost 0.161387 +Pass 200, Batch 0, Cost 0.040036 +Pass 300, Batch 0, Cost 0.023391 +Pass 400, Batch 0, Cost 0.005856 +Pass 500, Batch 0, Cost 0.003315 +Pass 600, Batch 0, Cost 0.009977 +Pass 700, Batch 0, Cost 0.020959 +Pass 800, Batch 0, Cost 0.105560 +Pass 900, Batch 0, Cost 0.239809 +Test with Epoch 0, avg_cost: 0.053097883707459624, acc: 0.9822850318471338 +``` + +训练之后,检查模型的预测准确度。用 MNIST 训练的时候,一般 softmax回归模型的分类准确率为约为 92.34%,多层感知器为97.66%,卷积神经网络可以达到 99.20%。 + + +## 应用模型 + +可以使用训练好的模型对手写体数字图片进行分类,下面程序展示了如何使用 `fluid.Inferencer` 接口进行推断。 + +### Inference 配置 + +`Inference` 需要一个 `infer_func` 和 `param_path` 来设置网络和经过训练的参数。 +我们可以简单地插入在此之前定义的分类器。 + +```python +inferencer = fluid.Inferencer( +# infer_func=softmax_regression, # uncomment for softmax regression +# infer_func=multilayer_perceptron, # uncomment for MLP +infer_func=convolutional_neural_network, # uncomment for LeNet5 +param_path=params_dirname, +place=place) +``` + +### 生成预测输入数据 + +`infer_3.png` 是数字 3 的一个示例图像。把它变成一个 numpy 数组以匹配数据馈送格式。 + +```python +# Prepare the test image +import os +import numpy as np +from PIL import Image +def load_image(file): +im = Image.open(file).convert('L') +im = im.resize((28, 28), Image.ANTIALIAS) +im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32) +im = im / 255.0 * 2.0 - 1.0 +return im + +cur_dir = cur_dir = os.getcwd() +img = load_image(cur_dir + '/image/infer_3.png') +``` + +### 预测 + +现在我们准备做预测。 + +```python +results = inferencer.infer({'img': img}) +lab = np.argsort(results) # probs and lab are the results of one batch data +print "Label of image/infer_3.png is: %d" % lab[0][0][-1] +``` + +## 总结 + +本教程的softmax回归、多层感知器和卷积神经网络是最基础的深度学习模型,后续章节中复杂的神经网络都是从它们衍生出来的,因此这几个模型对之后的学习大有裨益。同时,我们也观察到从最简单的softmax回归变换到稍复杂的卷积神经网络的时候,MNIST数据集上的识别准确率有了大幅度的提升,原因是卷积层具有局部连接和共享权重的特性。在之后学习新模型的时候,希望大家也要深入到新模型相比原模型带来效果提升的关键之处。此外,本教程还介绍了PaddlePaddle模型搭建的基本流程,从dataprovider的编写、网络层的构建,到最后的训练和预测。对这个流程熟悉以后,大家就可以用自己的数据,定义自己的网络模型,并完成自己的训练和预测任务了。 + +## 参考文献 + +1. LeCun, Yann, Léon Bottou, Yoshua Bengio, and Patrick Haffner. ["Gradient-based learning applied to document recognition."](http://ieeexplore.ieee.org/abstract/document/726791/) Proceedings of the IEEE 86, no. 11 (1998): 2278-2324. +2. Wejéus, Samuel. ["A Neural Network Approach to Arbitrary SymbolRecognition on Modern Smartphones."](http://www.diva-portal.org/smash/record.jsf?pid=diva2%3A753279&dswid=-434) (2014). +3. Decoste, Dennis, and Bernhard Schölkopf. ["Training invariant support vector machines."](http://link.springer.com/article/10.1023/A:1012454411458) Machine learning 46, no. 1-3 (2002): 161-190. +4. Simard, Patrice Y., David Steinkraus, and John C. Platt. ["Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis."](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.160.8494&rep=rep1&type=pdf) In ICDAR, vol. 3, pp. 958-962. 2003. +5. Salakhutdinov, Ruslan, and Geoffrey E. Hinton. ["Learning a Nonlinear Embedding by Preserving Class Neighbourhood Structure."](http://www.jmlr.org/proceedings/papers/v2/salakhutdinov07a/salakhutdinov07a.pdf) In AISTATS, vol. 11. 2007. +6. Cireşan, Dan Claudiu, Ueli Meier, Luca Maria Gambardella, and Jürgen Schmidhuber. ["Deep, big, simple neural nets for handwritten digit recognition."](http://www.mitpressjournals.org/doi/abs/10.1162/NECO_a_00052) Neural computation 22, no. 12 (2010): 3207-3220. +7. Deng, Li, Michael L. Seltzer, Dong Yu, Alex Acero, Abdel-rahman Mohamed, and Geoffrey E. Hinton. ["Binary coding of speech spectrograms using a deep auto-encoder."](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.185.1908&rep=rep1&type=pdf) In Interspeech, pp. 1692-1695. 2010. +8. Kégl, Balázs, and Róbert Busa-Fekete. ["Boosting products of base classifiers."](http://dl.acm.org/citation.cfm?id=1553439) In Proceedings of the 26th Annual International Conference on Machine Learning, pp. 497-504. ACM, 2009. +9. Rosenblatt, Frank. ["The perceptron: A probabilistic model for information storage and organization in the brain."](http://psycnet.apa.org/journals/rev/65/6/386/) Psychological review 65, no. 6 (1958): 386. +10. Bishop, Christopher M. ["Pattern recognition."](http://users.isr.ist.utl.pt/~wurmd/Livros/school/Bishop%20-%20Pattern%20Recognition%20And%20Machine%20Learning%20-%20Springer%20%202006.pdf) Machine Learning 128 (2006): 1-58. + +
+知识共享许可协议
本教程PaddlePaddle 创作,采用 知识共享 署名-相同方式共享 4.0 国际 许可协议进行许可。 diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn.png new file mode 100644 index 0000000000..3f5cdaacdc Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log.png new file mode 100644 index 0000000000..65bd17eacd Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/cnn_train_log.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/infer_3.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/infer_3.png new file mode 100644 index 0000000000..030cd60d3b Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/infer_3.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling.png new file mode 100644 index 0000000000..90b02fa2a7 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/max_pooling.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp.png new file mode 100644 index 0000000000..9f4d26cd8d Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log.png new file mode 100644 index 0000000000..f5a478fdc2 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mlp_train_log.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mnist_example_image.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mnist_example_image.png new file mode 100644 index 0000000000..4edd7cabf8 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/mnist_example_image.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression.png new file mode 100644 index 0000000000..40b9829828 Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_regression.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log.png new file mode 100644 index 0000000000..47204941af Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/softmax_train_log.png differ diff --git a/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/train_and_test.png b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/train_and_test.png new file mode 100644 index 0000000000..5cb87b450d Binary files /dev/null and b/doc/fluid/new_docs/beginners_guide/quick_start/recognize_digits/image/train_and_test.png differ diff --git a/doc/fluid/new_docs/faq/faq.rst b/doc/fluid/new_docs/faq/faq.rst new file mode 100644 index 0000000000..3b4bd4f895 --- /dev/null +++ b/doc/fluid/new_docs/faq/faq.rst @@ -0,0 +1,12 @@ +################### +编译安装与单元测试 +################### + +1. 通过pip安装的PaddlePaddle在 :code:`import paddle.fluid` 报找不到 :code:`libmkldnn.so` 或 :code:`libmklml_intel.so` +------------------------------------------------------------------------------------------ +出现这种问题的原因是在导入 :code:`paddle.fluid` 时需要加载 :code:`libmkldnn.so` 和 :code:`libmklml_intel.so`, +但是系统没有找到该文件。一般通过pip安装PaddlePaddle时会将 :code:`libmkldnn.so` 和 :code:`libmklml_intel.so` +拷贝到 :code:`/usr/local/lib` 路径下,所以解决办法是将该路径加到 :code:`LD_LIBRARY_PATH` 环境变量下, +即: :code:`export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH` 。 + +**注意**:如果是在虚拟环境中安装PaddlePaddle, :code:`libmkldnn.so` 和 :code:`libmklml_intel.so` 可能不在 :code:`/usr/local/lib` 路径下。 diff --git a/doc/fluid/new_docs/faq/index_cn.rst b/doc/fluid/new_docs/faq/index_cn.rst new file mode 100644 index 0000000000..bb2ed99217 --- /dev/null +++ b/doc/fluid/new_docs/faq/index_cn.rst @@ -0,0 +1,9 @@ +FAQ +==== + +本文档对关于PaddlePaddle的一些常见问题提供了解答。如果您的问题未在此处,请您到 `PaddlePaddle社区 `_ 查找答案或直接提 `issue `_ ,我们会及时进行回复。 + +.. toctree:: + :maxdepth: 1 + + faq.rst diff --git a/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_basic_concept.rst b/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_basic_concept.rst new file mode 100644 index 0000000000..55c3c761f9 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_basic_concept.rst @@ -0,0 +1,392 @@ +================================ +PaddleFluid设计思想和基本使用概念 +================================ + + + +Paddle Fluid 是用来让用户像 PyTorch 和 Tensorflow Eager Execution 一样执行程序。 +在这些系统中,不再有模型这个概念,应用也不再包含一个用于描述 Operator 图或者一系列层的符号描述, +而是像通用程序那样描述训练或者预测的过程。 + + +深度学习平台的演化 +================ + +时至今日,深度学习已成为事实上最流行的机器学习技术。学术界多年研究加上工业界的长期实践提出了若干有效的基本建模单元: +全连接,卷积,循环神经网络等;设计各类训练技巧:初始化方法,跨层连接,各类 norm 技术等; +发明了各种新的优化算法:Adadelta,Adam 等; +各类固定的网络结构:highway, residual, attention 等纷纷涌现,不胜枚举。 +学术界工业界多年的付出共同促成了深度学习方法今日的影响力。 + +学术研究和生产实践中积累了大量的知识,能够很好的解释神经网络中基本模块各自独的学习能力和特性。 +基本模块和训练技术的组合能够搭建出千变万化的神经网络模型。 +基本模块和训练技术是有限的,但他们的组合却是千变万化,这是深度学习方法的魅力所在,也是难度所在。 + +正是这样高度的模块化特性,研究者和工程师们都在努力避免重复造轮子以提高研究和生产的效率, +又进一步催生了深度学习平台技术的发展,深度学习框架已演变成为 AI 基础设施中重要的一部分。 +从 Theano,到 DistBelief,到 TensorFlow;从 Caffe 到 Caffe2; +从 Torch 到 PyTorch;从 PaddlePaddle 到 PaddleFluid, +深度学习平台技术也经历了两代的演化,并向着第三代平台技术迈进。 + +站在历史发展的今天,当我们准备切换尝试使用一个新的深度学习平台作为支持自己学习和研究的工具时, +平台技术都发生了哪些演化,能够为我们的带来什么便利呢? + +先让我们来看看深度学习框架解决的三大问题: + +- 如何描述计算以支持未来潜在会出现的新模型? +- 如何高效利用异构设备最大化算力? +- 如何利用网络中的计算机进行分布式计算来处理千万亿级别的数据? + +以上三个问题中的第一个和使用者研究者最为密切相关。 +这篇文章我们通过分析 PaddleFluid的设计理念, +来了解一个深度学习框架如何抽象深度学习模型,来看看我们的使用经验如何在不同深度学习平台之间过度和迁移。 + +如何描述计算 +============= + +让我们首先来看看 PaddleFluid 如何描述机器学习模型 + + +PaddleFluid之 :code:`Program` + +如何描述计算很大程度决定了一个神经网络框架计算功能的完备性。 +深度学习模型和方法历经二十多年的发展:“依次执行一组计算的前向, +再以和前向计算相反的顺序执行反向计算,中间无分支无交互”, +这样的模型结构已经无法满足研究者和千千万万框架使用者的想象力。 + +从 `PaddleFluid 的设计目标 `_ 来看, +在如何描述机器学习模型这一核心问题上,PaddleFluid 的目标是: +创造一种新的计算描述方式,不但能够描述至今为止人们已知的主流神经网络模型,并且能够支持未来会出现的任意模型。 + +PaddleFluid 是如何做到支持未来出现的新模型这一目标呢?PaddleFluid 的设计选择是: +对用户来说,用一段 :code:`Program` (在 PaddleFluid 内部会被转化为一种叫作 :code:`ProgramDesc` 的描述语言), +而不是用计算图来描述机器学习模型。 :code:`Program` 用符合用户使用直觉的方式, +提供一种新的描述语言能够描述任意复杂的机器学习模型。 + +对所有计算机专业同学学习编程语言的第一课一定是建立对“程序语言的三种执行结构:顺序执行,条件选择和循环执行”的认识。 +计算机世界的所有可计算逻辑都是由这三种执行结构表示,用这三种结构描述的逻辑是可计算的。那么同样道理, +对一个神经网络框架来说,如果可以和程序语言一样提供对这三种执行结构的支持,那么将可以描述任意复杂的, +可被计算机计算的机器学习模型。PaddleFluid通过提供对这三种执行结构的支持,来做到对任意复杂模型的描述。 + +具体来说: + +1. Fluid 的核心设计理念都可以类比到程序语言,如果已经有写程序的经验,那么使用 Fluid 构建神经网络模型的体验,将非常接近写程序; + +2. 在 PaddleFluid 中,用户不会显示地感知“计算图”这样的概念,一个机器学习模型被描述为一个 Fluid :code:`Program` (Fluid 内部称之为 :code:`ProgramDesc` ); + +- 一个 Fluid :code:`Program` 由一组嵌套的 :code:`Block` 构成。 :code:`Block` 的概念可以类比到 C++ 或是 Java 中的一对大括号,或是 Python 语言中的一个缩进快; +- :code:`Block` 中的计算由顺序执行、条件选择或者循环执行三种方式组合,构成复杂的计算逻辑。 + +3. Fluid :code:`Program` 中包含对计算和计算对象的描述。计算的描述称之为 Operator;计算作用的对象(或者说 Operator 的输入和输出)被统一为 Tensor。 + +在描述计算和计算的作用对象这一问题上,各个深度学习框架的选择是相同的,如果有一个平台的使用经验,那么将非常容易在各个平台之间进行迁移。 + +核心使用概念 +============= + +下面,我们将更详细地了解核心使用概念在PaddlePaddle的使用方法。 + +数据表示和计算的对象:Tensor +-------------------------- + +Tensor 是向量矩阵概念的扩展,是神经网络模型计算操作的基本对象。这在是今天所有主流深度学习平台的共同选择。 + +可以简单地将 Tensor 理解为一个 N 维向量,它可以有任意多的维度。一个 Tensor 具有两个基本特征: + +1. 数据类型:每个 Tensor 的所有元素具有同样的、已知的数据类型; + +2. 大小(或者说形状):即维度的个数(rank,阶)以及各维度的长度。 + +Tensor 某些维度的长度在定义模型阶段可能是未知的,在实际算法执行时才能确定。例如一个 mini-batch 中包含的样本数目(batch size),或者是一个 mini-batch 中序列的最大长度。 + +PaddleFluid中的Tensor +"""""""""""""""""""""" + +PaddleFluid 中也使用 Tensor 作为神经网络中输入输出数据的统一表示。Tensor 的概念在今天主流的深度学习平台中都是完全相同,可以在各个深度学习框架之间直接无缝迁移。 + +在 Fluid 中也同样存在三种特殊的 Tensor: + +1. 模型中的可学习参数 + +模型中的可学习参数生存期和整个训练任务一样长,会接受优化算法的更新。在 PaddleFluid 中同样以 :code:`Variable` 表示; +用户在绝大多数情况下都不需要自己来创建网络中的可学习参数,Fluid 为几乎常见的神经网络基本计算模块都提供了封装。 +以最简单的全连接模型为例,下面的代码片段会直接为全连接层创建连接权值 WW 和偏置( :code:`bias` )两个可学习参数, +无需显示地调用 variable 相关接口创建可学习参数。 + + +:: + + import paddle.fluid as fluid + + y = fluid.layers.fc(input=x, size=128, bias_attr=True) + +2. 输入输出Tensor + +整个神经网络的输入数据也是一个特殊的 Tensor,在这个 Tensor 中, +一些维度的大小在定义模型时无法确定(通常包括:batch size; +如果 mini-batch 之间,数据可变,也会包括序列的最大长度,图片的宽度和高度等),在定义模型时需要占位; +PaddleFluid 中使用 :code:`fluid.layers.data` 来接入输入数据, :code:`fluid.layer.data` 需要提供输入 Tensor 的 形状信息, +当遇到无法确定的维度 时, 相应维度指定为 None ,如下面的代码片段所示: + +:: + + import paddle.fluid as fluid + + x = fluid.layers.data(name="x", shape=[2, None, 3], dtype="int64") + +3. 常量 Tensor 在 PaddleFluid 中需要通过组合 Tensor 和 :code:`fluid.layers.assign` 来实现。 + + +计算原语:Operation/Operator +---------------------------- + +Tensor 是今天所有主流深度学习框架的统一数据表示(输入、输出、中间计算结果、模型的可学习参数都是 Tensor)。 +另一方面,对数据的操作,在主流深度学习框架中也高度统一为:Operator/Operation。 +在中文中,通常我们会习惯将其称之为算子。 + +注:在 PaddleFluid 中使用 Operator 称呼对 Tensor 的操作。 + +Operation/Operator 接受多个 Tensor 作为输入,输出若干个 Tensor,表示了从输入到输出的变化。 + +PaddleFluid中的Operator +"""""""""""""""""""""""" + +PaddleFluid 支持的所有算子,可以在 `API 帮助文档 `_ 中查看。 + +为了便于用户使用,在 Python 端,Fluid 中的 Operator 被进一步封装入 :code:`paddle.fluid.layers` , +:code:`paddle.fluid.networks` 等模块。这是因为:一些常见的对Tensor的操作可能是有更多基础操作构成, +例如:l2 norm 内部由 reduce、elementwise_add,scale 等多个 Operator 组合计算逻辑完成, +为了提高使用的便利性,框架内部对基础 Operator 进行了一些封装,包括创建 Operator 依赖可学习参数, +可学习参数的初始化细节等,减少用户重复开发的成本。 + +对所有深度学习框架都面临同样的封装,在绝大多数情况下,用户很少会直接与框架底层的 Operator 直接打交道,而是使用框架提供的 layers,networks 等模块,降低开发的代码量。不论是什么样的概念,他们在各框架之间的本质和作用都是相同的:对 Tensor 的变换。 + +总结 +>>>>>> + +不论叫作 Operation、Operator 还是 layers,他们在各深度学习平台中的含义和作用都是相同的:对 Tensor 的变换。是一个深度学习平台提供的基础计算能力。可以在每个平台各自的 API 帮助文档中查到。 + +在各个深度学习平台都已加入 ONNX 项目的今天,每个深度学习平台提供给大家的基本算子都已趋同,与此同时,每个平台也各有其特点,会提供一些独特的算子,方便某一类任务的开发。 + +构建模型并执行 +-------------- + +整个训练任务运行方法如下: + +Fluid中的Program和Executor +""""""""""""""""""""""""""" + +1. Fluid 使用 :code:`Program` 描述神经网络模型,对用户来说,并没有计算图的概念。 +用户定义的所有 Tensor 以及对 Tensor 的操作:Operator 都会被加入一段 :code:`Program` 中; + +一段 Program 由嵌套的 :code:`Block` 构成,但用户无需显示地创建 :code:`Block` 或是显示地注意到 :code:`Block` 的存在; +在 Fluid 程序中, :code:`Block` 是在调用 :code:`while_op` , :code:`if_op` , :code:`parallel_do` 等特殊 :code:`Operator` 时,由这些 :code:`Operator` 来创建; +对用户使用来说,只需要知道自己正在向一段 Fluid Program 中添加变量( :code:`Tensor` )和操作( :code:`Operator` )即可。 + +2. Fluid 利用 :code:`Executor` 来执行一段 Fluid :code:`Program` 。 + +为进一步理解 Fluid 中 :code:`Executor` 的作用,需要先解释一下 Fluid 程序的执行流程。 下图展示单机上,Fluid 程序的执行流程: + +.. figure:: fluid_local_train.jpeg + + :scale: 50% + :align: center + + Figure.1 + + Fluid本地训练任务执行流程图 + +1. Fluid 设计思想和灵感非常类似于程序设计语言,和高级编译语言 C++/Java 编写程序的过程非常类似,Fluid 程序执行分为两个重要阶段:编译时和运行时; + +2. 编译期,用户通过调用 Fluid 提供的算子,向一段 :code:`Program` 中添加变量(Tensor)以及对变量的操作(Operators 或者 Layers)。用户只需要描述核心的前向计算,不需要关心反向计算,分布式下,异构设备下如何计算; + +3. 原始的 :code:`Program` 在平台内部转换为中间描述语言: :code:`ProgramDesc` ; + +4. 编译期最重要的一个功能模块是 Transpiler。Transpiler 接受一段 :code:`ProgramDesc` ,输出一段变化后的 :code:`ProgramDesc` ,作为后端 Executor 最终需要执行的 :code:`Fluid Program` ; + +最为常用的 Transipler 包括: + +1. 内存优化 Transipler:通过对变量读写依赖关系分析,插入内存回收 Operator 以维持运行过程中较小的内存开销; + +2. 分布式环境下的 Transpiler:接受用户定义的 local Program ,生成 Parameter Client 和 Parameter Server 执行的两段 :code:`Program` 。 + +3. 后端 Executor 接受 Transpiler 输出的这段 :code:`Program` ,依次执行其中的 Operator(可以类比为程序语言中的指令),在执行过程中会为 Operator 创建所需的输入输出并进行管理。 + +从上面的过程中可以看到,Fluid 程序的执行过程分为:编译器的定义 :code:`Program` ,和创建 :code:`Executor` 运行 :code:`Program` 。 + :code:`Executor` 执行一段 :code:`Program` 的过程是不可交互和不可中断的。 + +在 Fluid 中,可以创建多余一段 :code:`Program` 。默认情况,一个 PaddleFluid 程序中存在 2 段 Program: + +1. :code:`fluid.framework.default_startup_program` :其中定义了创建模型参数,输入输出,以及模型中可学习参数的初始化等各种操作; + +- :code:`default_startup_program` 可以由框架自动生成,使用时无需显示地创建; +- 如果调用修改了参数的默认初始化方式,框架会自动的将相关的修改加入 :code:`default_startup_program` 。 + +2. :code:`fluid.framework.default_main_program` :定义了神经网络模型,前向反向计算,以及优化算法对网络中可学习参数的更新; + +- 使用 Fluid 的核心就是构建起 :code:`default_main_program` 。 + +3. PaddleFluid 中的 :code:`Scope` 类似于 TensorFlow 中的 collection 这一概念,但在 Fluid 中 :code:`Scope` 是框架后端概念,用户无法直接操作。因此,在使用框架时无需关心。 + +总结 +""""" + +Fluid 中通过 Executor 来执行一段用户定义的 Fluid :code:`Program` 。 +1. Executor 连接了 Fluid 的前端和后端; + +2. Executor 接受用户定义的原始模型(一段 :code:`Program` ),通过调用系统中不同功能更的 :code:`Transpiler` 完成对原始 :code:`Program` 的变化,进行优化。 + +完整实例:如何完成一个机器学习模型的训练 +=================================== + + + +这一节,我们以 MNIST 手写数字识别问题 —— 机器学习任务的“Hello World”问题和数据,为例,通过一个可以运行的完整实例,来学习上文介绍的概念如何在PaddleFluid 平台使用。 + +步骤1:定义数据 +---------------- + +PaddleFluid 中以 :code:`fluid.layers.data` 来接收输入数据。 + +:: + + import numpy as np + + import paddle.fluid as fluid + import paddle.v2 as paddle + + # define the input layers for the network. + x = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32") + y_ = fluid.layers.data(name="label", shape=[1], dtype="int64") + +Fluid 中 Tensor 的第 0 维度固定为 batch size。在上面代码段中,图像输入 :code:`x` 的形状为:[1, 28, 28]。这三个维度的含义分别是:channel 数目,图像的高度和宽度。 + +实际上 Fluid 框架内部,一幅图像输入是一个 4-D Tensor,所有 Tensor 的第 0 维固定为 batch size。框架内部会自动为batch size进行填充占位。无需对batch size指定填充占位。 + +如果除去 batch size(第 0 维度)外,如果 Tensor 某一维度的大小只能在运行时确定,可以在该位置上直接指定 :code:`None` 进行占位。 + +步骤2:定义模型 +-------------- + +通过调用 Fluid 提供的算子定义含有一个隐层的神经网络。Fluid 模型的分为模型结构和优化方法两部分。这一点与 TensorFlow 程序十分相似似,使用概念可以直接对应进行迁移。 + +:: + + # define the network topology. + y = fluid.layers.fc(input=x, size=10, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=y_) + avg_loss = fluid.layers.mean(loss) + + # define the optimization algorithm. + optimizer = fluid.optimizer.Adam(learning_rate=1e-3) + optimizer.minimize(avg_loss) + +Fluid 使用 Program 而不是计算图描述模型,一般情况下,用户无需关心 Program 的细节,当调用以上 layers 时,会向一个全局的 Program: :code:`fluid.framework.default_main_program` 中插入变量(Tensor)和对变量的操作(上述代码段中的 layers 和 optimzier)。 + +步骤3:参数初始化 +---------------- + +如上文介绍,Fluid 程序中的 Executor 是连接 Fluid 前端和后端的接口。 + +默认一个Fluid模型存在至少两段 Program。用于初始化网络中的可学习参数的那一段 :code:`Program` 叫作 :code:`fluid.default_startup_program()` 。 + +只有执行器 executor 可以执行 Fluid Program,因此,在初始化网络中的可学习参数之前,需要首先创建一个 Fluid executor。 + +:: + + # define the executor. + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + +在以上代码段中, :code:`place` 用于告诉 executor 一段 Fluid Program 在何种设备上执行, +常见的有 :code:`fluid.CPUPlace()` 和 :code:`fluid.CUDAPlace()` 。 + +步骤4:数据输入 + 执行模型训练 +---------------------------- + +我们在步骤 2 中定义的神经网络模型最终被插入一段叫做 :code:`fluid.framework.default_main_program` 的 Fluid Program 中。 + +网络可学习参数初始化之后,可以通过让执行器 Executor 执行这段 :code:`fluid.framework.default_main_program` 来进行训练。 + +:: + + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=5000), + batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y_]) + + for pass_id in range(100): + for batch_id, data in enumerate(train_reader()): + loss = exe.run( + fluid.framework.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_loss]) + print("Cur Cost : %f" % (np.array(loss[0])[0])) + +从上面的代码片段中可以看到,Fluid 程序的训练过程和 TensorFlow 程序的训练过程非常接近, +都放在一个 :code:`for` 循环中,循环读取一个 mini-batch 数据, +调用执行器执行 Fluid :code:`default_main_program` :接收 mini-batch 输入,在其上进行前向,反向和参数更新计算。 + +`注:上面程序使用了 Fluid 内置的 MNIST 数据,和我们提供给 TensorFlow 示例程序的 MNIST 数据完全一样。` + +步骤5:观察模型效果 +----------------- + +以上步骤已经构成了完整的 Tensorflow 模型训练程序,每个 batch 观察一次 loss,可以直观看到模型的迭代效果: + +.. figure:: fluid_mnist.png + + :scale: 40% + :align: center + + Figure.2 + + Fluid MNIST手写数字识别任务代价下降曲线 + +附:完整代码 +------------ + +:: + + import numpy as np + + import paddle.fluid as fluid + import paddle.v2 as paddle + + + def main(): + BATCH_SIZE = 128 + + # define the input layers for the network. + x = fluid.layers.data(name="img", shape=[1, 28, 28], dtype="float32") + y_ = fluid.layers.data(name="label", shape=[1], dtype="int64") + + # define the network topology. + y = fluid.layers.fc(input=x, size=10, act="softmax") + loss = fluid.layers.cross_entropy(input=y, label=y_) + avg_loss = fluid.layers.mean(loss) + + optimizer = fluid.optimizer.Adam(learning_rate=5e-3) + optimizer.minimize(avg_loss) + + # define the executor. + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + train_reader = paddle.batch( + paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=5000), + batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y_]) + + for pass_id in range(100): + for batch_id, data in enumerate(train_reader()): + loss = exe.run( + fluid.framework.default_main_program(), + feed=feeder.feed(data), + fetch_list=[avg_loss]) + print("Cur Cost : %f" % (np.array(loss[0])[0])) + + if __name__ == "__main__": + main() diff --git a/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_local_train.jpeg b/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_local_train.jpeg new file mode 100644 index 0000000000..0a495901fa Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_local_train.jpeg differ diff --git a/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_mnist.png b/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_mnist.png new file mode 100644 index 0000000000..e5ad0ba058 Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/basic_concept/fluid_mnist.png differ diff --git a/doc/fluid/new_docs/user_guides/howto/configure_simple_model/index.rst b/doc/fluid/new_docs/user_guides/howto/configure_simple_model/index.rst new file mode 100644 index 0000000000..5946a2ccb7 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/configure_simple_model/index.rst @@ -0,0 +1,88 @@ +.. _user_guide_configure_simple_model: + +############## +配置简单的网络 +############## + +在解决实际问题时,可以先从逻辑层面对问题进行建模,明确模型所需要的 **输入数据类型**、**计算逻辑**、**求解目标** 以及 **优化算法**。PaddlePaddle提供了丰富的算子来实现模型逻辑。下面以一个简单回归任务举例说明如何使用PaddlePaddle构建模型。该例子完整代码参见 `fit_a_line `_。 + +问题描述及定义 +############## + +问题描述: 给定一组数据 :math:``,求解出函数 :math:`f`,使得 :math:`y=f(x)`,其中 :math:`x\subset X` 表示一条样本的特征,为 :math:`13` 维的实数向量;:math:`y \subset Y` 为一实数表示该样本对应的值。 + +我们可以尝试用回归模型来对问题建模,回归问题的损失函数有很多,这里选择常用的均方误差。为简化问题,这里假定 :math:`f` 为简单的线性变换函数,同时选用随机梯度下降算法来求解模型。 + ++----------------+----------------------------------------------+ +| 输入数据类型 | 样本特征: 13 维 实数 | ++ +----------------------------------------------+ +| | 样本标签: 1 维 实数 | ++----------------+----------------------------------------------+ +| 计算逻辑 | 使用线性模型,产生 1维实数作为模型的预测输出 | ++----------------+----------------------------------------------+ +| 求解目标 | 最小化模型预测输出与样本标签间的均方误差 | ++----------------+----------------------------------------------+ +| 优化算法 | 随机梯度下降 | ++----------------+----------------------------------------------+ + +使用PaddlePadle建模 +################### + +从逻辑层面明确了输入数据格式、模型结构、损失函数以及优化算法后,需要使用PaddlePaddle提供的API及算子来实现模型逻辑。一个典型的模型主要包含4个部分,分别是:输入数据格式定义,模型前向计算逻辑,损失函数以及优化算法。 + +数据层 +------ + +PaddlePaddle提供了 :code:`fluid.layers.data()` 算子来描述输入数据的格式。 + +:code:`fluid.layers.data()` 算子的输出是一个Variable。这个Variable的实际类型是Tensor。Tensor具有强大的表征能力,可以表示多维数据。为了精确描述数据结构,通常需要指定数据shape以及数值类型type。其中shape为一个整数向量,type可以是一个字符串类型。目前支持的数据类型参考 :ref:`user_guide_paddle_support_data_types` 。 模型训练一般会使用batch的方式读取数据,而batch的size在训练过程中可能不固定。data算子会依据实际数据来推断batch size,所以这里提供shape时不用关心batch size,只需关心一条样本的shape即可,更高级用法请参考 :ref:`user_guide_customize_batch_size_rank`。从上知,:math:`x` 为 :math:`13` 维的实数向量,:math:`y` 为实数,可使用下面代码定义数据层: + +.. code-block:: python + + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + +该模型使用的数据比较简单,事实上data算子还可以描述变长的、嵌套的序列数据。也可以使用 :code:`open_files` 打开文件进行训练。更详细的文档可参照 :ref:`user_guide_prepare_data`。 + +前向计算逻辑 +------------ + +实现一个模型最重要的部分是实现计算逻辑,PaddlePaddle提供了丰富的算子。这些算子的封装粒度不同,通常对应一种或一组变换逻辑。算子输出即为对输入数据执行变换后的结果。用户可以灵活使用算子来完成复杂的模型逻辑。比如图像相关任务中会使用较多的卷积算子、序列任务中会使用LSTM/GRU等算子。复杂模型通常会组合多种算子,以完成复杂的变换。PaddlePaddle提供了非常自然的方式来组合算子,一般地可以使用下面的方式: + +.. code-block:: python + + op_1_out = fluid.layers.op_1(input=op_1_in, ...) + op_2_out = fluid.layers.op_2(input=op_1_out, ...) + ... + +其中op_1和op_2表示算子类型,可以是fc来执行线性变换(全连接),也可以是conv来执行卷积变换等。通过算子的输入输出的连接来定义算子的计算顺序以及数据流方向。上面的例子中,op_1的输出是op_2的输入,那么在执行计算时,会先计算op_1,然后计算op_2。更复杂的模型可能需要使用控制流算子,依据输入数据来动态执行,针对这种情况,PaddlePaddle提供了IfElseOp和WhileOp等。算子的文档可参考 :code:`fluid.layers`。具体到这个任务, 我们使用一个fc算子: + +.. code-block:: python + + y_predict = fluid.layers.fc(input=x, size=1, act=None) + +损失函数 +-------- + +损失函数对应求解目标,我们可以通过最小化损失来求解模型。大多数模型使用的损失函数,输出是一个实数值。但是PaddlePaddle提供的损失算子一般是针对一条样本计算。当输入一个batch的数据时,损失算子的输出有多个值,每个值对应一条样本的损失,所以通常会在损失算子后面使用mean等算子,来对损失做归约。模型在一次前向迭代后会得到一个损失值,PaddlePaddle会自动执行链式求导法则计算模型里面每个参数和变量对应的梯度值。这里使用均方误差损失: + +.. code-block:: python + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + +优化方法 +-------- + +确定损失函数后,可以通过前向计算得到损失值,然后通过链式求导法则得到参数的梯度值。获取梯度值后需要更新参数,最简单的算法是随机梯度下降法::math:`w=w - \eta \cdot g`。但是普通的随机梯度下降算法存在一些问题: 比如收敛不稳定等。为了改善模型的训练速度以及效果,学术界先后提出了很多优化算法,包括: :code:`Momentum`、:code:`RMSProp`、:code:`Adam` 等。这些优化算法采用不同的策略来更新模型参数,一般可以针对具体任务和具体模型来选择优化算法。不管使用何种优化算法,学习率一般是一个需要指定的比较重要的超参数,需要通过实验仔细调整。这里采用随机梯度下降算法: + +.. code-block:: python + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + +更多优化算子可以参考 :code:`fluid.optimizer()` 。 + +下一步做什么? +############## + +使用PaddlePaddle实现模型时需要关注 **数据层**、**前向计算逻辑**、**损失函数** 和 **优化方法**。不同的任务需要的数据格式不同,涉及的计算逻辑不同,损失函数不同,优化方法也不同。PaddlePaddle提供了丰富的模型示例,可以以这些示例为参考来构建自己的模型结构。用户可以访问 `模型库 `_ 查看官方提供的示例。 diff --git a/doc/fluid/new_docs/user_guides/howto/debug/index.rst b/doc/fluid/new_docs/user_guides/howto/debug/index.rst new file mode 100644 index 0000000000..0878e17b40 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/debug/index.rst @@ -0,0 +1,10 @@ +############ +Debug 工具 +############ + +PaddlePaddle 提供了如下方式方便 Debug 训练 情况 + +.. toctree:: + :maxdepth: 2 + + visualdl.md diff --git a/doc/fluid/new_docs/user_guides/howto/debug/visualdl.md b/doc/fluid/new_docs/user_guides/howto/debug/visualdl.md new file mode 100644 index 0000000000..a2f30823a6 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/debug/visualdl.md @@ -0,0 +1,218 @@ +# VisualDL (Visualize the Deep Learning) +

+ +

+ +## 介绍 +VisualDL是一个面向深度学习任务设计的可视化工具,包含了scalar、参数分布、模型结构、图像可视化等功能,项目正处于高速迭代中,新的组件会不断加入。 + +目前大多数DNN平台均使用Python作为配置语言,VisualDL原生支持python的使用, +通过在模型的Python配置中添加几行,便可以为训练过程提供丰富的可视化支持。 + +除了Python SDK之外,VisualDL底层采用C++编写,其暴露的C++ SDK也可以集成到其他平台中, +实现原生的性能和定制效果。 + +## 组件 +VisualDL 目前支持4种组件: + +- graph +- scalar +- image +- histogram + +### Graph +兼容 ONNX(Open Neural Network Exchange)[https://github.com/onnx/onnx], 通过与 python SDK的结合,VisualDL可以兼容包括 PaddlePaddle, pytorch, mxnet在内的大部分主流DNN平台。 + +

+ +

+ +### Scalar +可以用于展示训练测试的误差趋势 + +

+ +

+ +### Image +可以用于可视化任何tensor,或模型生成的图片 + +

+ +

+ +### Histogram + +用于可视化任何tensor中元素分布的变化趋势 + +

+ +

+ +## 快速尝试 +请使用下面的命令,来快速测试 VisualDL。 + +``` +# 安装,建議是在虚拟环境或anaconda下。 +pip install --upgrade visualdl + +# 运行一个例子,vdl_create_scratch_log 将创建测试日志 +vdl_create_scratch_log +visualDL --logdir=scratch_log --port=8080 + +# 访问 http://127.0.0.1:8080 +``` + +如果以上步骤出现问题,很可能是因为python或pip不同版本或不同位置所致,以下安装方法能解决。 + +## 使用 virtualenv 安装 + +[Virtualenv](https://virtualenv.pypa.io/en/stable/) 能创建独立Python环境,也能确保Python和pip的相对位置正确。 + +在macOS上,安装pip和virtualenv如下: +``` +sudo easy_install pip +pip install --upgrade virtualenv +``` + +在Linux上,安装pip和virtualenv如下: +``` +sudo apt-get install python3-pip python3-dev python-virtualenv +``` + +然后创建一个虚拟环境: +``` +virtualenv ~/vdl # for Python2.7 +virtualenv -p python3 ~/vdl for Python 3.x +``` + +```~/vdl``` 是你的Virtualenv目录, 你也可以选择任一目录。 + +激活虚拟环境如下: +``` +source ~/vdl/bin/activate +``` + +现在再安装 VisualDL 和运行范例: + +``` +pip install --upgrade visualdl + +# 运行一个例子,vdl_create_scratch_log 将创建测试日志 +vdl_create_scratch_log +visualDL --logdir=scratch_log --port=8080 + +# 访问 http://127.0.0.1:8080 +``` + +如果在虚拟环境下仍然遇到安装问题,请尝试以下方法。 + + +## 使用 Anaconda 安装 + +Anaconda是一个用于科学计算的Python发行版,提供了包管理与环境管理的功能,可以很方便地解决多版本python并存、切换以及各种第三方包安装问题。 + +请根据[Anaconda下载网站](https://www.anaconda.com/download) 的指示去下载和安装Anaconda. +下载Python 3.6版本的command-Line installer. + +创建conda环境名字为```vdl```或任何名字: +``` +conda create -n vdl pip python=2.7 # or python=3.3, etc. +``` + +激活conda环境如下: +``` +source activate vdl +``` + +现在再安装 VisualDL 和运行范例: + +``` +pip install --upgrade visualdl + +# 运行一个例子,vdl_create_scratch_log 将创建测试日志 +vdl_create_scratch_log +visualDL --logdir=scratch_log --port=8080 + +# 访问 http://127.0.0.1:8080 +``` + +如果仍然遇到安装问题,请尝试以下用源代码安装方法。 + +### 使用代码安装 +``` +#建議是在虚拟环境或anaconda下。 +git clone https://github.com/PaddlePaddle/VisualDL.git +cd VisualDL + +python setup.py bdist_wheel +pip install --upgrade dist/visualdl-*.whl +``` + +如果打包和安装遇到其他问题,不安装只想运行Visual DL可以看[这里](https://github.com/PaddlePaddle/VisualDL/blob/develop/docs/how_to_dev_frontend_en.md) + + +## SDK +VisualDL 同时提供了python SDK 和 C++ SDK 来实现不同方式的使用。 + +### Python SDK +VisualDL 现在支持 Python 2和 Python 3。 + +以最简单的Scalar组件为例,尝试创建一个scalar组件并插入多个时间步的数据: + +```python +import random +from visualdl import LogWriter + +logdir = "./tmp" +logger = LogWriter(logdir, sync_cycle=10000) + +# mark the components with 'train' label. +with logger.mode("train"): + # create a scalar component called 'scalars/scalar0' + scalar0 = logger.scalar("scalars/scalar0") + +# add some records during DL model running. +for step in range(100): + scalar0.add_record(step, random.random()) +``` + +### C++ SDK +上面 Python SDK 中代码完全一致的C++ SDK用法如下 +```c++ +#include +#include +#include "visualdl/sdk.h" + +namespace vs = visualdl; +namespace cp = visualdl::components; + +int main() { + const std::string dir = "./tmp"; + vs::LogWriter logger(dir, 10000); + + logger.SetMode("train"); + auto tablet = logger.AddTablet("scalars/scalar0"); + + cp::Scalar scalar0(tablet); + + for (int step = 0; step < 1000; step++) { + float v = (float)std::rand() / RAND_MAX; + scalar0.AddRecord(step, v); + } + + return 0; +} +``` +## 启动Board +当训练过程中已经产生了日志数据,就可以启动board进行实时预览可视化信息 + +``` +visualDL --logdir +``` + +board 还支持一下参数来实现远程的访问: + +- `--host` 设定IP +- `--port` 设定端口 +- `--model_pb` 指定 ONNX 格式的模型文件 diff --git a/doc/fluid/new_docs/user_guides/howto/evaluation/index.rst b/doc/fluid/new_docs/user_guides/howto/evaluation/index.rst new file mode 100644 index 0000000000..6f6698cadc --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/evaluation/index.rst @@ -0,0 +1,10 @@ +############ +模型评估和调试 +############ + +PaddlePaddle Fluid提供了常用的模型评估指标,并提供了VisualDL工具可视化模型效果。 + +.. toctree:: + :maxdepth: 2 + + metrics diff --git a/doc/fluid/new_docs/user_guides/howto/evaluation/metrics.rst b/doc/fluid/new_docs/user_guides/howto/evaluation/metrics.rst new file mode 100644 index 0000000000..f37968a503 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/evaluation/metrics.rst @@ -0,0 +1,62 @@ +############ +模型评估 +############ + +模型评估是用指标反映模型在预期目标下精度,根据模型任务决定观察指标,作为在训练中调整超参数,评估模型效果的重要依据。 +metric函数的输入为当前模型的预测preds和labels,输出是自定义的。metric函数和loss函数非常相似,但是metric并不是模型训练网络组成部分。 + +用户可以通过训练网络得到当前的预测preds和labels,在Python端定制metric函数;也可以通过定制c++ Operator的方式,在GPU上加速metric计算。 + +paddle.fluid.metrics模块包含该功能 + + +常用指标 +############ + +metric函数根据模型任务不同,指标构建方法因任务而异。 + +回归类型任务labels是实数,因此loss和metric函数构建相同,可参考MSE的方法。 +分类任务常用指标为分类指标,本文提到的一般是二分类指标,多分类和多标签需要查看对应的API文档。例如排序指标auc,多分类可以作为0,1分类任务,auc指标仍然适用。 +Fluid中包含了常用分类指标,例如Precision, Recall, Accuracy等,更多请阅读API文档。以 :ref:`Precision` 为例,具体方法为 + +.. code-block:: python + + >>> import paddle.fluid as fluid + >>> labels = fluid.layers.data(name="data", shape=[1], dtype="int32") + >>> data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32") + >>> pred = fluid.layers.fc(input=data, size=1000, act="tanh") + >>> acc = fluid.metrics.Precision() + >>> for pass in range(PASSES): + >>> acc.reset() + >>> for data in train_reader(): + >>> loss, preds, labels = exe.run(fetch_list=[cost, preds, labels]) + >>> acc.update(preds=preds, labels=labels) + >>> numpy_acc = acc.eval() + + +其他任务例如MultiTask Learning,Metric Learning,Learning To Rank各种指标构造方法请参考API文档。 + +自定义指标 +############ +Fluid支持自定义指标,灵活支持各类计算任务。下文通过一个简单的计数器metric函数,实现对模型的评估。 +其中preds是模型预测值,labels是给定的标签。 + +.. code-block:: python + + >>> class MyMetric(MetricBase): + >>> def __init__(self, name=None): + >>> super(MyMetric, self).__init__(name) + >>> self.counter = 0 # simple counter + + >>> def reset(self): + >>> self.counter = 0 + + >>> def update(self, preds, labels): + >>> if not _is_numpy_(preds): + >>> raise ValueError("The 'preds' must be a numpy ndarray.") + >>> if not _is_numpy_(labels): + >>> raise ValueError("The 'labels' must be a numpy ndarray.") + >>> self.counter += sum(preds == labels) + + >>> def eval(self): + >>> return self.counter diff --git a/doc/fluid/new_docs/user_guides/howto/modification/foo.rst b/doc/fluid/new_docs/user_guides/howto/modification/foo.rst new file mode 100644 index 0000000000..9d43c91a85 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/modification/foo.rst @@ -0,0 +1,3 @@ +### +FAQ +### diff --git a/doc/fluid/new_docs/user_guides/howto/prepare_data/feeding_data.rst b/doc/fluid/new_docs/user_guides/howto/prepare_data/feeding_data.rst new file mode 100644 index 0000000000..c3bf033bb8 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/prepare_data/feeding_data.rst @@ -0,0 +1,169 @@ +.. _user_guide_use_numpy_array_as_train_data: + +########################### +使用Numpy Array作为训练数据 +########################### + +PaddlePaddle Fluid支持使用 :code:`fluid.layers.data()` 配置数据层; +再使用 Numpy Array 或者直接使用Python创建C++的 +:code:`fluid.LoDTensor` , 通过 :code:`Executor.run(feed=...)` 传给 +:code:`fluid.Executor` 或 :code:`fluid.ParallelExecutor` 。 + +数据层配置 +########## + +通过 :code:`fluid.layers.data()` 可以配置神经网络中需要的数据层。具体方法为: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name="image", shape=[3, 224, 224]) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + + # use image/label as layer input + prediction = fluid.layers.fc(input=image, size=1000, act="softmax") + loss = fluid.layers.cross_entropy(input=prediction, label=label) + ... + +上段代码中,:code:`image` 和 :code:`label` 是通过 :code:`fluid.layers.data` +创建的两个输入数据层。其中 :code:`image` 是 :code:`[3, 224, 224]` 维度的浮点数据; +:code:`label` 是 :code:`[1]` 维度的整数数据。这里需要注意的是: + +1. Fluid中默认使用 :code:`-1` 表示 batch size 维度,默认情况下会在 :code:`shape` + 的第一个维度添加 :code:`-1` 。 所以 上段代码中, 我们可以接受将一个 + :code:`[32, 3, 224, 224]` 的numpy array传给 :code:`image` 。 如果想自定义batch size + 维度的位置的话,请设置 :code:`fluid.layers.data(append_batch_size=False)` 。 + 请参考进阶使用中的 :ref:`user_guide_customize_batch_size_rank` 。 + + +2. Fluid中用来做类别标签的数据类型是 :code:`int64`,并且标签从0开始。可用数据类型请参考 :ref:`user_guide_paddle_support_data_types`。 + +.. _user_guide_feed_data_to_executor: + +传递训练数据给执行器 +#################### + +:code:`Executor.run` 和 :code:`ParallelExecutor.run` 都接受一个 :code:`feed` 参数。 +这个参数是一个Python的字典。它的键是数据层的名字,例如上文代码中的 :code:`image`。 +它的值是对应的numpy array。 + +例如: + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + exe.run(feed={ + "image": numpy.random.random(size=(32, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(32, 1)).astype('int64') + }) + +进阶使用 +######## + +如何传入序列数据 +---------------- + +序列数据是PaddlePaddle Fluid支持的特殊数据类型,可以使用 :code:`LoDTensor` 作为 +输入数据类型。它需要用户: 1. 传入一个mini-batch需要被训练的所有数据; +2.每个序列的长度信息。 +用户可以使用 :code:`fluid.create_lod_tensor` 来创建 :code:`LoDTensor`。 + +传入序列信息的时候,需要设置序列嵌套深度,:code:`lod_level`。 +例如训练数据是词汇组成的句子,:code:`lod_level=1`;训练数据是 词汇先组成了句子, +句子再组成了段落,那么 :code:`lod_level=2`。 + +例如: + +.. code-block:: python + + sentence = fluid.layers.data(name="sentence", dtype="int64", shape=[1], lod_level=1) + + ... + + exe.run(feed={ + "sentence": create_lod_tensor( + data=numpy.array([1, 3, 4, 5, 3, 6, 8], dtype='int64').reshape(-1, 1), + lod=[4, 1, 2], + place=fluid.CPUPlace() + ) + }) + +训练数据 :code:`sentence` 包含三个样本,他们的长度分别是 :code:`4, 1, 2`。 +他们分别是 :code:`data[0:4]`, :code:`data[4:5]` 和 :code:`data[5:7]`。 + +如何分别设置ParallelExecutor中每个设备的训练数据 +------------------------------------------------ + +用户将数据传递给使用 :code:`ParallelExecutor.run(feed=...)` 时, +可以显示指定每一个训练设备(例如GPU)上的数据。 +用户需要将一个列表传递给 :code:`feed` 参数,列表中的每一个元素都是一个字典。 +这个字典的键是数据层的名字,值是数据层的值。 + +例如: + +.. code-block:: python + + parallel_executor = fluid.ParallelExecutor() + parallel_executor.run( + feed=[ + { + "image": numpy.random.random(size=(32, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(32, 1)).astype('int64') + }, + { + "image": numpy.random.random(size=(16, 3, 224, 224)).astype('float32'), + "label": numpy.random.random(size=(16, 1)).astype('int64') + }, + ] + ) + +上述代码中,GPU0会训练 32 个样本,而 GPU1训练 16 个样本。 + + +.. _user_guide_customize_batch_size_rank: + +自定义BatchSize维度 +------------------- + +PaddlePaddle Fluid默认batch size是数据的第一维度,以 :code:`-1` 表示。但是在高级 +使用中,batch_size 可以固定,也可以是其他维度或者多个维度来表示。这都需要设置 +:code:`fluid.layers.data(append_batch_size=False)` 来完成。 + +1. 固定batch size维度 + + .. code-block:: python + + image = fluid.layers.data(name="image", shape=[32, 784], append_batch_size=False) + + 这里,:code:`image` 永远是一个 :code:`[32, 784]` 大小的矩阵。 + +2. 使用其他维度表示batch size + + .. code-block:: python + + sentence = fluid.layers.data(name="sentence", + shape=[80, -1, 1], + append_batch_size=False, + dtype="int64") + + 这里 :code:`sentence` 的中间维度是batch size。这种数据排布会用在定长的循环神经 + 网络中。 + + +.. _user_guide_paddle_support_data_types: + +Fluid目前支持的数据类型 +----------------------- + +PaddlePaddle Fluid目前支持的数据类型包括: + + * float16: 部分操作支持 + * float32: 主要实数类型 + * float64: 次要实数类型,支持大部分操作 + * int32: 次要标签类型 + * int64: 主要标签类型 + * uint64: 次要标签类型 + * bool: 控制流数据类型 + * int16: 次要标签类型 + * uint8: 输入数据类型,可用于图像像素 \ No newline at end of file diff --git a/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst b/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst new file mode 100644 index 0000000000..56fa928029 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/prepare_data/index.rst @@ -0,0 +1,52 @@ +.. _user_guide_prepare_data: + +######## +准备数据 +######## + +PaddlePaddle Fluid支持两种传入数据的方式: + +1. 用户需要使用 :code:`fluid.layers.data` +配置数据输入层,并在 :code:`fluid.Executor` 或 :code:`fluid.ParallelExecutor` +中,使用 :code:`executor.run(feed=...)` 传入训练数据。 + +2. 用户需要先将训练数据 +转换成 Paddle 识别的 :code:`fluid.recordio_writer` , 再使用 +:code:`fluid.layers.open_files` 以及 :code:`fluid.layers.reader` 配置数据读取。 + +这两种准备数据方法的比较如下: + +.. _user_guide_prepare_data_comparision: + ++------------+----------------------------------+---------------------------------------+ +| | Feed数据 | 使用Reader | ++============+==================================+=======================================+ +| API接口 | :code:`executor.run(feed=...)` | :code:`fluid.layers.reader` | ++------------+----------------------------------+---------------------------------------+ +| 数据格式 | Numpy Array | :code:`fluid.recordio_writer` | ++------------+----------------------------------+---------------------------------------+ +| 数据增强 | Python端使用其他库完成 | 使用Fluid中的Operator 完成 | ++------------+----------------------------------+---------------------------------------+ +| 速度 | 慢 | 快 | ++------------+----------------------------------+---------------------------------------+ +| 推荐用途 | 调试模型 | 工业训练 | ++------------+----------------------------------+---------------------------------------+ + +这些准备数据的详细使用方法,请参考: + +.. toctree:: + :maxdepth: 2 + + feeding_data + use_recordio_reader + +Python Reader +############# + +为了方便用户在Python中定义数据处理流程,PaddlePaddle Fluid支持 Python Reader, +具体请参考: + +.. toctree:: + :maxdepth: 2 + + reader.md diff --git a/doc/fluid/new_docs/user_guides/howto/prepare_data/reader.md b/doc/fluid/new_docs/user_guides/howto/prepare_data/reader.md new file mode 100644 index 0000000000..aa50e4d261 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/prepare_data/reader.md @@ -0,0 +1,210 @@ +```eval_rst +.. _user_guide_reader: +``` + +# Python Reader + +During the training and testing phases, PaddlePaddle programs need to read data. To help the users write code that performs reading input data, we define the following: + +- A *reader*: A function that reads data (from file, network, random number generator, etc) and yields the data items. +- A *reader creator*: A function that returns a reader function. +- A *reader decorator*: A function, which takes in one or more readers, and returns a reader. +- A *batch reader*: A function that reads data (from *reader*, file, network, random number generator, etc) and yields a batch of data items. + +and also provide a function which can convert a reader to a batch reader, frequently used reader creators and reader decorators. + +## Data Reader Interface + +*Data reader* doesn't have to be a function that reads and yields data items. It can just be any function without any parameters that creates an iterable (anything can be used in `for x in iterable`) as follows: + +``` +iterable = data_reader() +``` + +The item produced from the iterable should be a **single** entry of data and **not** a mini batch. The entry of data could be a single item or a tuple of items. Item should be of one of the [supported types](http://www.paddlepaddle.org/doc/ui/data_provider/pydataprovider2.html?highlight=dense_vector#input-types) (e.g., numpy 1d array of float32, int, list of int etc.) + +An example implementation for single item data reader creator is as follows: + +```python +def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader +``` + +An example implementation for multiple item data reader creator is as follows: +```python +def reader_creator_random_image_and_label(width, height, label): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height), label + return reader +``` + +## Batch Reader Interface + +*Batch reader* can be any function without any parameters that creates an iterable (anything can be used in `for x in iterable`). The output of the iterable should be a batch (list) of data items. Each item inside the list should be a tuple. + +Here are some valid outputs: + +```python +# a mini batch of three data items. Each data item consist three columns of data, each of which is 1. +[(1, 1, 1), +(2, 2, 2), +(3, 3, 3)] + +# a mini batch of three data items, each data item is a list (single column). +[([1,1,1],), +([2,2,2],), +([3,3,3],)] +``` + +Please note that each item inside the list must be a tuple, below is an invalid output: +```python + # wrong, [1,1,1] needs to be inside a tuple: ([1,1,1],). + # Otherwise it is ambiguous whether [1,1,1] means a single column of data [1, 1, 1], + # or three columns of data, each of which is 1. +[[1,1,1], +[2,2,2], +[3,3,3]] +``` + +It is easy to convert from a reader to a batch reader: + +```python +mnist_train = paddle.dataset.mnist.train() +mnist_train_batch_reader = paddle.batch(mnist_train, 128) +``` + +It is also straight forward to create a custom batch reader: + +```python +def custom_batch_reader(): + while True: + batch = [] + for i in xrange(128): + batch.append((numpy.random.uniform(-1, 1, 28*28),)) # note that it's a tuple being appended. + yield batch + +mnist_random_image_batch_reader = custom_batch_reader +``` + +## Usage + +Following is how we can use the reader with PaddlePaddle: +The batch reader, a mapping from item(s) to data layer, the batch size and the number of total passes will be passed into `paddle.train` as follows: + +```python +# two data layer is created: +image_layer = paddle.layer.data("image", ...) +label_layer = paddle.layer.data("label", ...) + +# ... +batch_reader = paddle.batch(paddle.dataset.mnist.train(), 128) +paddle.train(batch_reader, {"image":0, "label":1}, 128, 10, ...) +``` + +## Data Reader Decorator + +The *Data reader decorator* takes in a single reader or multiple data readers and returns a new data reader. It is similar to a [python decorator](https://wiki.python.org/moin/PythonDecorators), but it does not use `@` in the syntax. + +Since we have a strict interface for data readers (no parameters and return a single data item), a data reader can be used in a flexible way using data reader decorators. Following are a few examples: + +### Prefetch Data + +Since reading data may take some time and training can not proceed without data, it is generally a good idea to prefetch the data. + +Use `paddle.reader.buffered` to prefetch data: + +```python +buffered_reader = paddle.reader.buffered(paddle.dataset.mnist.train(), 100) +``` + +`buffered_reader` will try to buffer (prefetch) `100` data entries. + +### Compose Multiple Data Readers + +For example, if we want to use a source of real images (say reusing mnist dataset), and a source of random images as input for [Generative Adversarial Networks](https://arxiv.org/abs/1406.2661). + +We can do the following : + +```python +def reader_creator_random_image(width, height): + def reader(): + while True: + yield numpy.random.uniform(-1, 1, size=width*height) + return reader + +def reader_creator_bool(t): + def reader: + while True: + yield t + return reader + +true_reader = reader_creator_bool(True) +false_reader = reader_creator_bool(False) + +reader = paddle.reader.compose(paddle.dataset.mnist.train(), data_reader_creator_random_image(20, 20), true_reader, false_reader) +# Skipped 1 because paddle.dataset.mnist.train() produces two items per data entry. +# And we don't care about the second item at this time. +paddle.train(paddle.batch(reader, 128), {"true_image":0, "fake_image": 2, "true_label": 3, "false_label": 4}, ...) +``` + +### Shuffle + +Given the shuffle buffer size `n`, `paddle.reader.shuffle` returns a data reader that buffers `n` data entries and shuffles them before a data entry is read. + +Example: +```python +reader = paddle.reader.shuffle(paddle.dataset.mnist.train(), 512) +``` + +## Q & A + +### Why does a reader return only a single entry, and not a mini batch? + +Returning a single entry makes reusing existing data readers much easier (for example, if an existing reader returns 3 entries instead if a single entry, the training code will be more complicated because it need to handle cases like a batch size 2). + +We provide a function: `paddle.batch` to turn (a single entry) reader into a batch reader. + +### Why do we need a batch reader, isn't is sufficient to give the reader and batch_size as arguments during training ? + +In most of the cases, it would be sufficient to give the reader and batch_size as arguments to the train method. However sometimes the user wants to customize the order of data entries inside a mini batch, or even change the batch size dynamically. For these cases using a batch reader is very efficient and helpful. + +### Why use a dictionary instead of a list to provide mapping? + +Using a dictionary (`{"image":0, "label":1}`) instead of a list (`["image", "label"]`) gives the advantage that the user can easily reuse the items (e.g., using `{"image_a":0, "image_b":0, "label":1}`) or even skip an item (e.g., using `{"image_a":0, "label":2}`). + +### How to create a custom data reader creator ? + +```python +def image_reader_creator(image_path, label_path, n): + def reader(): + f = open(image_path) + l = open(label_path) + images = numpy.fromfile( + f, 'ubyte', count=n * 28 * 28).reshape((n, 28 * 28)).astype('float32') + images = images / 255.0 * 2.0 - 1.0 + labels = numpy.fromfile(l, 'ubyte', count=n).astype("int") + for i in xrange(n): + yield images[i, :], labels[i] # a single entry of data is created each time + f.close() + l.close() + return reader + +# images_reader_creator creates a reader +reader = image_reader_creator("/path/to/image_file", "/path/to/label_file", 1024) +paddle.train(paddle.batch(reader, 128), {"image":0, "label":1}, ...) +``` + +### How is `paddle.train` implemented + +An example implementation of paddle.train is: + +```python +def train(batch_reader, mapping, batch_size, total_pass): + for pass_idx in range(total_pass): + for mini_batch in batch_reader(): # this loop will never end in online learning. + do_forward_backward(mini_batch, mapping) +``` diff --git a/doc/fluid/new_docs/user_guides/howto/prepare_data/use_recordio_reader.rst b/doc/fluid/new_docs/user_guides/howto/prepare_data/use_recordio_reader.rst new file mode 100644 index 0000000000..dfda33f1b0 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/prepare_data/use_recordio_reader.rst @@ -0,0 +1,167 @@ +.. _user_guide_use_recordio_as_train_data: + +############################ +使用RecordIO文件作为训练数据 +############################ + +相比于 :ref:`user_guide_use_numpy_array_as_train_data`, +:ref:`user_guide_use_recordio_as_train_data` 的性能更好; +但是用户需要先将训练数据集转换成RecordIO文件格式,再使用 +:code:`fluid.layers.open_files()` 层在神经网络配置中导入 RecordIO 文件。 +用户还可以使用 :code:`fluid.layers.double_buffer()` 加速数据从内存到显存的拷贝, +使用 :code:`fluid.layers.Preprocessor` 工具进行数据增强。 + +将训练数据转换成RecordIO文件格式 +################################ + +:code:`fluid.recordio_writer` 中,每个记录都是一个 +:code:`vector`, 即一个支持序列信息的Tensor数组。这个数组包括训练所需 +的所有特征。例如对于图像分类来说,这个数组可以包含图片和分类标签。 + +用户可以使用 :code:`fluid.recordio_writer.convert_reader_to_recordio_file()` 可以将 +:ref:`user_guide_reader` 转换成一个RecordIO文件。或者可以使用 +:code:`fluid.recordio_writer.convert_reader_to_recordio_files()` 将一个 +:ref:`user_guide_reader` 转换成多个RecordIO文件。 + +具体使用方法为: + +.. code-block:: python + + import paddle.fluid as fluid + import numpy + + def reader_creator(): + def __impl__(): + for i in range(1000): + yield [ + numpy.random.random(size=[3,224,224], dtype="float32"), + numpy.random.random(size=[1], dtype="int64") + ] + return __impl__ + + img = fluid.layers.data(name="image", shape=[3, 224, 224]) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + feeder = fluid.DataFeeder(feed_list=[img, label], place=fluid.CPUPlace()) + + BATCH_SIZE = 32 + reader = paddle.batch(reader_creator(), batch_size=BATCH_SIZE) + fluid.recordio_writer.convert_reader_to_recordio_file( + "train.recordio", feeder=feeder, reader_creator=reader) + +其中 :code:`reader_creator` 创建了一个 :code:`Reader`。 +:ref:`_api_fluid_data_feeder_DataFeeder` +是将 :code:`Reader` 转换成 :code:`LoDTensor` 的工具。详细请参考 +:ref:`user_guide_reader` 。 + +上述程序将 :code:`reader_creator` 的数据转换成了 :code:`train.recordio` 文件, +其中每一个record 含有 32 条样本。如果batch size会在训练过程中调整, +用户可以将每一个Record的样本数设置成1。并参考 +:ref:`user_guide_use_recordio_as_train_data_use_op_create_batch`。 + + +配置神经网络, 打开RecordIO文件 +############################## + +RecordIO文件转换好之后,用户可以使用 :code:`fluid.layers.open_files()` +打开文件,并使用 :code:`fluid.layers.read_file` 读取文件内容。 +简单使用方法如下: + +.. code-block:: python + + import paddle.fluid as fluid + + file_obj = fluid.layers.open_files( + filenames=["train.recordio"], + shape=[[3, 224, 224], [1]], + lod_levels=[0, 0], + dtypes=["float32", "int64"], + pass_num=100 + ) + + image, label = fluid.layers.read_file(file_obj) + +其中如果设置了 :code:`pass_num` ,那么当所有数据读完后,会重新读取数据, +直到读取了 :code:`pass_num` 遍。 + + + +进阶使用 +######## + + +使用 :code:`fluid.layers.double_buffer()` +------------------------------------------ + +:code:`Double buffer` 使用双缓冲技术,将训练数据从内存中复制到显存中。配置双缓冲 +需要使用 :code:`fluid.layers.double_buffer()` 修饰文件对象。 例如: + +.. code-block:: python + + import paddle.fliud as fluid + file_obj = fluid.layers.open_files(...) + file_obj = fluid.layers.double_buffer(file_obj) + + image, label = fluid.layers.read_file(file_obj) + +双缓冲技术可以参考 +`Multiple buffering `_ 。 + +配置数据增强 +------------ + +使用 :code:`fluid.layers.Preprocessor` 可以配置文件的数据增强方法。例如 + +.. code-block:: python + + import paddle.fluid as fluid + file_obj = fluid.layers.open_files(...) + preprocessor = fluid.layers.Preprocessor(reader=data_file) + with preprocessor.block(): + image, label = preprocessor.inputs() + image = image / 2 + label = label + 1 + preprocessor.outputs(image, label) + +如上代码所示,使用 :code:`Preprocessor` 定义了一个数据增强模块,并在 +:code:`with preprocessor.block()` 中定义了数据增强的具体操作。 用户通过配置 +:code:`preprocessor.inputs()` 获得数据文件中的各个字段。 并用 +:code:`preprocessor.outputs()` 标记预处理后的输出。 + +.. _user_guide_use_recordio_as_train_data_use_op_create_batch: + +使用Op组batch +------------- + +使用 :code:`fluid.layers.batch()` 可以在训练的过程中动态的组batch。例如 + +.. code-block:: python + + import paddle.fluid as fluid + file_obj = fluid.layers.open_files(...) + file_obj = fluid.layers.batch(file_obj, batch_size=32) + + img, label = fluid.layers.read_file(file_obj) + +需要注意的是,如果数据集中的最后几个样本不能组成 :code:`batch_size` 大小的批量数据, +那么这几个样本直接组成一个批量数据进行训练。 + +读入数据的shuffle +----------------- + +使用 :code:`fluid.layers.shuffle()` 可以在训练过程中动态重排训练数据。例如 + +.. code-block:: python + + import paddle.fluid as fluid + file_obj = fluid.layers.open_files(...) + file_obj = fliud.layers.shuffle(file_obj, buffer_size=8192) + + img, label = fliud.layers.read_file(file_obj) + +需要注意的是: + +1. :code:`shuffle` 实现方法是: +先读入 :code:`buffer_size` 条样本,再随机的选出样本进行训练。 + +2. :code:`shuffle` 中 :code:`buffer_size` 会占用训练内存,需要确定训练过程中内存 +足够支持缓存 :code:`buffer_size` 条数据。 diff --git a/doc/fluid/new_docs/user_guides/howto/training/checkpoint_doc_cn.md b/doc/fluid/new_docs/user_guides/howto/training/checkpoint_doc_cn.md new file mode 100644 index 0000000000..c4afd536c6 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/checkpoint_doc_cn.md @@ -0,0 +1,60 @@ +# Checkpoint功能使用指南 + +## 背景 +单机/多机在训练过程中会由于软件/硬件的问题出现异常,导致训练中断,进而导致训练无结果或结果不可用,浪费大量时间和机器性能。 + +## 目的 +Checkpoint功能能够在训练中途对训练数据中间数据进行保存,出现异常恢复训练的时候能够加载中途保存的数据继续训练, 实现单机/多机的容错训练的功能。 + +## 说明 +### 目前已实现的参数保存: +1. 基于Trainer 0 实现训练过程中的参数保存 +2. 基于PServer 实现了```Distribute Lookup Table```相关参数保存 +### Fluid Checkpoint 保存数据目录结构: + +``` +checkpoint_dir (用户定义的checkpoint目录) +├── checkpoint_0 (第一次保存) +│ ├── __lockup_table__ (Distribute Lookup Table 目录) +│ │ ├── table_pserver_0 (Pserver 0 号保存的lookup table 数据) +│ │ └── table_pserver_1 +│ ├── __model__ (model 目录) +│ │ └── var.w_1 +│ └── trainer_0 (trainer 自有数据保存) +│ ├── epoch_id +│ └── step_id +└── checkpoint_1 (第二次保存) +``` + +## 使用方法 +### 声明Fluid.CheckpointConfig +用户对checkpoint功能的配置,主要是配置对象```Fluid```中的```CheckpointConfig```. + +```CheckpointConfig``` 包括4个参数: + +| 参数 | 类型 | 说明 | +| - | :-: | - | +| checkpoint_dir | int| checkpoint存储目录 | +| max_num_checkpoints | int | 最大保存的checkpoint副本数 | +| epoch_interval | int | 每隔epoch_interval轮epoch | +| step_interval | int | 每隔step_interval轮step | + +### 在Fluid.Trainer对象的声明中加入Fluid.CheckpointConfig的声明 +Trainer的__init__方法的参数中包含了对```CheckpointConfig```, 需要传入在声明Trainer前声明的```CheckpointConfig```对象。 +如: +```python +config = CheckpointConfig( + checkpoint_dir = "/tmp/ckpt", max_num_checkpoints = 2, + epoch_interval = 2, step_interval = 10) +trainer = Trainer(..., checkpoint_config=config) +``` +定义和声明完成后, 训练在运行过程中就会在指定的step和epoch处进行保存,出现异常时,就会自动从最新的checkpoint目录进行参数恢复啦! + +## 相关API +[Trainer API 说明](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/trainer.py) + +## 注意 +1. 保证每个训练的```checkpoint_dir``` 与其他训练独立。 +2. 最大副本数量```max_num_checkpoints```需要根据磁盘容量以及模型的大小进行调整, 保证磁盘的可用性。 +3. ```epoch_interval``` 和 ```step_interval``` 不宜过小, 频繁的进行checkpoint会拖慢训练速度。 +4. **分布式训练**的过程中:每个Trainer都会在```checkpoint_dir```目录中保存当前Trainer的参数(只有Trainer 0会保存模型的参数),需要**分布式文件系统(HDFS等)**将同```checkpoint_dir```目录的数据进行合并才能得到完整的数据,恢复训练的时候需要用完整的数据进行恢复。 diff --git a/doc/fluid/new_docs/user_guides/howto/training/checkpoint_doc_en.md b/doc/fluid/new_docs/user_guides/howto/training/checkpoint_doc_en.md new file mode 100644 index 0000000000..14d37246ca --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/checkpoint_doc_en.md @@ -0,0 +1,62 @@ +# Checkpoint User Guide + +## Background +In many cases, Stand-alone training and Distributed training can be aborted by the software problem or hardware problem. More seriously, we waste so much time and the performance of the machine but get nothing, which makes us frustrating and we have to restart it again. + +## Purpose +The feature of ```Checkpoint``` can save Intermediate model variables, lookup table variable, and other needs data in checkpoint directory. When the exception occurs, we can load these variables from the checkpoint directory immediately. +## Introduce +### Complete Features Currently: +1. The Trainer 0 will save model variables in training. +2. Each of the Trainer will save its own arguments needed. +3. Each of the Parameter Server will save ```Distribute Lookup Table``` variables in training. +### Fluid Checkpoint directory structure: + +``` +checkpoint_dir (the checkpoint directory user define) +├── checkpoint_0 (the first save directory) +│ ├── __lockup_table__ (Distribute Lookup Table directory) +│ │ ├── table_pserver_0 (Lookup table's data about Pserver 0) +│ │ └── table_pserver_1 +│ ├── __model__ (model directory) +│ │ └── var.w_1 +│ └── trainer_0 (each trainer will save its own data) +│ ├── epoch_id +│ └── step_id +└── checkpoint_1 (the second save directory) +``` + +## usage +### Fluid.CheckpointConfig construct +When the user wants to use ```Checkpoint``` feature, the main thing user have to do is declare ```CheckpointConfig``` and construct it. + +```CheckpointConfig``` has 4 member variables need to be initialized: + +| Member Variable | Type | Comment | +| - | :-: | - | +| checkpoint_dir | int| checkpoint directory | +| max_num_checkpoints | int | Maximum number of checkpoint copies | +| epoch_interval | int | epoch interval times | +| step_interval | int | step interval times | + +### Add Fluid.CheckpointConfig's declaration in Fluid.Trainer +Because the initialization of Trainer needs an instance of ```CheckpointConfig```., we should declare ```CheckpointConfig``` in ```Fluid``` first. + +For example: +```python +config = CheckpointConfig( + checkpoint_dir = "/tmp/ckpt", max_num_checkpoints = 2, + epoch_interval = 2, step_interval = 10) +trainer = Trainer(..., checkpoint_config=config) +``` + +After all the things done, the train will save checkpoint at the specified epoch and step, when the train is aborted, the user can restart it, the train will restore from the latest copy. + +## Related API +[Related Trainer API](https://github.com/PaddlePaddle/Paddle/blob/develop/python/paddle/fluid/trainer.py) + +## Attention +1. Make the ```checkpoint_dir``` only be used by one train job. +2. The number of ```max_num_checkpoints``` need to be adjusted by the disk size and model size. +3. Too frequently to slow down the train speed, so too ```small epoch_interval``` and ```step_interval``` are not suitable. +4. **In distributed train**, each Trainer will save arguments in its ```checkpoint_dir``` (Only Trainer 0 will save model variables). We need **distributed file system (HDFS, etc)** to merge all the ```checkpoint_dir``` to get the whole data. diff --git a/doc/fluid/new_docs/user_guides/howto/training/cluster_howto.rst b/doc/fluid/new_docs/user_guides/howto/training/cluster_howto.rst new file mode 100644 index 0000000000..00ec9e819c --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/cluster_howto.rst @@ -0,0 +1,160 @@ +.. _cluster_howto + +Fluid分布式训练使用手册 +==================== + +分布式训练基本思想 +--------------- + +分布式深度学习训练通常分为两种并行化方法:数据并行,模型并行,参考下图: + +.. image:: src/parallelism.png + +在模型并行方式下,模型的层和参数将被分布在多个节点上,模型在一个mini-batch的前向和反向训练中,将经过多次跨\ +节点之间的通信。每个节点只保存整个模型的一部分;在数据并行方式下,每个节点保存有完整的模型的层和参数,每个节点\ +独自完成前向和反向计算,然后完成梯度的聚合并同步的更新所有节点上的参数。Fluid目前版本仅提供数据并行方式,另外\ +诸如模型并行的特例实现(超大稀疏模型训练)功能将在后续的文档中予以说明。 + +在数据并行模式的训练中,Fluid使用了两种通信模式,用于应对不同训练任务对分布式训练的要求,分别为RPC通信和Collective +通信。其中RPC通信方式使用 `gRPC `_ ,Collective通信方式使用 +`NCCL2 `_ 。 + +.. csv-table:: 下面是一个RPC通信和Collective通信的横向对比: + :header: "Feature", "Coolective", "RPC" + + "Ring-Based通信", "Yes", "No" + "异步训练", "Yes", "Yes" + "分布式模型", "No", "Yes" + "容错训练", "No", "Yes" + "性能", "Faster", "Fast" + +- RPC通信方式的结构: + + .. image:: src/dist_train_pserver.png + + 使用RPC通信方式的数据并行分布式训练,会启动多个pserver进程和多个trainer进程,每个pserver进程\ + 会保存一部分模型参数,并负责接收从trainer发送的梯度并更新这些模型参数;每个trainer进程会保存一份\ + 完整的模型,并使用一部分数据进行训练,然后向pserver发送梯度,最后从pserver拉取更新后的参数。 + + pserver进程可以在和trainer完全不同的计算节点上,也可以和trainer公用节点。一个分布式任务所需要的\ + pserver进程个数通常需要根据实际情况调整,已达到最佳的性能,然而通常来说pserver的进程不会比trainer\ + 更多。 + + 在使用GPU训练时,pserver可以选择使用GPU或只使用CPU,如果pserver也使用GPU,则会增加一次从CPU拷贝\ + 接收到的梯度数据到GPU的开销,在某些情况下会导致整体训练性能降低。 + +- NCCL2通信方式的结构: + + .. image:: src/dist_train_nccl2.png + + 使用NCCL2(Collective通信方式)进行分布式训练,是不需要启动pserver进程的,每个trainer进程都保存\ + 一份完整的模型参数,在完成计算梯度之后通过trainer之间的相互通信,Reduce梯度数据到所有节点的所有设备\ + 然后每个节点在各自完成参数更新。 + +使用parameter server方式的训练 +------------------------------ + +使用 :code:`trainer` API,程序可以自动的通过识别环境变量决定是否已分布式方式执行。 + +.. csv-table:: 需要在您的分布式环境中配置的环境变量包括: + :header: "环境变量", "说明" + + "PADDLE_TRAINING_ROLE", "当前进程的角色,可以是PSERVER或TRAINER" + "PADDLE_PSERVER_PORT", "parameter使用的端口" + "PADDLE_PSERVER_IPS", "parameter server的IP地址列表,用逗号分开" + "PADDLE_TRAINERS", "分布式任务中trainer节点的个数" + "PADDLE_CURRENT_IP", "当前节点的IP" + "PADDLE_TRAINER_ID", "trainer节点的id,从0~n-1,不能有重复" + +使用更加底层的 :code:`transpiler` API可以提供自定义的分布式训练的方法,比如可以在同一台机器上, +启动多个pserver和trainer进行训练,使用底层API的方法可以参考下面的样例代码: + +.. code-block:: python + + role = "PSERVER" + trainer_id = 0 + pserver_endpoints = "127.0.0.1:6170,127.0.0.1:6171" + current_endpoint = "127.0.0.1:6170" + trainers = 4 + t = fluid.DistributeTranspiler() + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, + pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + elif role == "TRAINER": + train_loop(t.get_trainer_program()) + + +选择同步或异步训练 +++++++++++++++++++ + +Fluid分布式任务可以支持同步训练或异步训练,在同步训练方式下,所有的trainer节点,会在每个mini-batch +同步地合并所有节点的梯度数据并发送给parameter server完成更新,在异步训练方式下,每个trainer没有相互\ +同步等待的过程,可以独立的parameter server的参数。通常情况下,使用异步训练方式,可以在trainer节点\ +更多的时候比同步训练方式有更高的总体吞吐量。 + +在调用 :code:`transpile` 函数时,默认会生成同步训练的分布式程序,通过指定 :code:`sync_mode=False` +参数即可生成异步训练的程序: + +.. code-block:: python + + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, sync_mode=False) + + +选择参数分布方法 +++++++++++++++++ + +参数 :code:`split_method` 可以指定参数在parameter server上的分布方式。 + +Fluid默认使用 `RoundRobin `_ +方式将参数分布在多个parameter server上。此方式在默认未关闭参数切分的情况下,参数会较平均的分布在所有的 +parameter server上。如果需要使用其他,可以传入其他的方法,目前可选的方法有: :code:`RoundRobin` 和 +:code:`HashName` 。也可以使用自定义的分布方式,只需要参考 +`这里 `_ +编写自定义的分布函数。 + + +关闭切分参数 +++++++++++++ + +参数 :code:`slice_var_up` 指定是否将较大(大于8192个元素)的参数切分到多个parameter server已均衡计算负载,默认为开启。 + +当模型中的可训练参数体积比较均匀或者使用自定义的参数分布方法是参数均匀分布在多个parameter server上, +可以选择关闭切分参数,这样可以降低切分和重组带来的计算和拷贝开销: + +.. code-block:: python + + t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers, slice_var_up=False) + + +使用NCCL2通信方式的训练 +-------------------- + +注NCCL2模式目前仅支持trainer API,NCCL2方式并没有很多可选项,也没有"transpiler",所以并没有底层API。 +使用NCCL2方式同样需要配置每个节点的环境变量,此处与parameter server模式有所不同,并不需要启动独立的\ +parameter server的进程,只需要启动多个trainer进程即可。 + + +.. csv-table:: NCCL2模式环境变量说明: + :header: "环境变量", "说明" + + "PADDLE_TRAINER_IPS", "所有Trainer节点的IP列表,用逗号分隔" + "PADDLE_TRAINER_ID", "trainer节点的id,从0~n-1,不能有重复" + "PADDLE_PSERVER_PORT", "一个端口,用于在NCCL2初始化时,广播NCCL ID" + "PADDLE_CURRENT_IP", "当前节点的IP" + +目前使用NCCL2进行分布式训练仅支持同步训练方式。使用NCCL2方式的分布式训练,更适合模型体积较大,并需要使用\ +同步训练和GPU训练,如果硬件设备支持RDMA和GPU Direct,可以达到很高的分布式训练性能。 + +注意如果系统中有多个网络设备,需要手动指定NCCL2使用的设备, +假设需要使用 :code:`eth2` 为通信设备,需要设定如下环境变量: + +.. code-block:: bash + + export NCCL_SOCKET_IFNAME=eth2 + +另外NCCL2提供了其他的开关环境变量,比如指定是否开启GPU Direct,是否使用RDMA等,详情可以参考 +`ncclknobs `_ 。 diff --git a/doc/fluid/new_docs/user_guides/howto/training/cluster_quick_start.rst b/doc/fluid/new_docs/user_guides/howto/training/cluster_quick_start.rst new file mode 100644 index 0000000000..6131c92d6f --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/cluster_quick_start.rst @@ -0,0 +1,143 @@ +.. _cluster_quick_start: + +分布式训练快速开始 +================== + +准备工作 +-------- + +在本篇文章中,我们将会在介绍如何快速在一个集群中启动一个 PaddlePaddle +的分布式训练任务,在开始之前,请按如下步骤做些准备工作: + +1. 准备一个至少4个节点的集群,并且保证网络可以联通,在本文中我们使用 + ``*.paddlepaddle.com`` 来表示每个节点的主机名称,您可以根据集群的实际情况来修改它。 + +2. 在开始之前确保已经阅读过 :ref:`how_to_install` + 并且可以在集群的所有节点上可以正常运行 PaddlePaddle。 + +启动集群训练任务 +---------------- + +在启动集群训练脚本时,需要在不同的节点上指定不同的环境变量,具体如下: + ++-----------------+-----------------+-----------------+---------------------+ +| 环境变量 | 数据类型 | 样例 | 描述 | ++=================+=================+=================+=====================+ +| PADDLE_TRAINING | str | PSERVER,TRAINER | 训练节点的角色 | +| _ROLE | | | | ++-----------------+-----------------+-----------------+---------------------+ +| PADDLE_PSERVER_ | str | ps0.paddlepaddl | 所有 pserver | +| IPS | | e.com,ps1.paddl | 节点的 IP | +| | | epaddle.com… | 地址或 | +| | | | hostname, | +| | | | 用“,”分隔 | ++-----------------+-----------------+-----------------+---------------------+ +| PADDLE_PSERVER_ | int | 6174 | pserver | +| PORT | | | 节点监听的端口 | ++-----------------+-----------------+-----------------+---------------------+ +| PADDLE_TRAINERS | int | 2 | 训练任务中 | +| | | | trainer | +| | | | 节点的数量 | ++-----------------+-----------------+-----------------+---------------------+ +| PADDLE_CURRENT_ | str | ps0.paddlepaddl | 当前 pserver | +| IP | | e.com | 节点的 IP | +| | | | 地址或 hostanme | ++-----------------+-----------------+-----------------+---------------------+ +| PADDLE_TRAINER_ | int | 0 | 当前 trainer | +| ID | | | 节点的唯一 ID, | +| | | | 取值范围为从0开始到 | +| | | | PADDLE_TRAINERS-1 | ++-----------------+-----------------+-----------------+---------------------+ + +样例代码 +~~~~~~~~ + +将下面程序代码保存为 ``fluid_dist.py`` + +.. code:: python + + import paddle + import paddle.fluid as fluid + import contextlib + import numpy + import unittest + + # train reader + BATCH_SIZE = 20 + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + + test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=BATCH_SIZE) + + + def train_program(): + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + + loss = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(loss) + + return avg_loss + + def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.001) + + def train(use_cuda, train_program): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + trainer = fluid.Trainer( + train_func=train_program, place=place, optimizer_func=optimizer_func) + + def event_handler(event): + if isinstance(event, fluid.EndStepEvent): + if event.step == 10: + test_metrics = trainer.test( + reader=test_reader, feed_order=['x', 'y']) + print("step {0}, loss: {1}".format(event.step, test_metrics)) + trainer.stop() + + trainer.train( + reader=train_reader, + num_epochs=100, + event_handler=event_handler, + feed_order=['x', 'y']) + + train(False, train_program) + +启动trainer节点和pserver节点 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. list-table:: + :header-rows: 1 + + * - 启动节点 + - 启动命令 + - 说明 + * - ps0.paddlepaddle.com + - :code:`PADDLE_TRAINING_ROLE=PSERVER PADDLE_CURRENT_IP=ps0.paddlepaddle.com PADDLE_PSERVER_IPS=ps0.paddlepaddle.com,ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` + - 启动 pserver 节点 + * - ps1.paddlepaddle.com + - :code:`PADDLE_TRAINING_ROLE=PSERVER PADDLE_CURRENT_IP=ps1.paddlepaddle.com PADDLE_PSERVER_IPS=ps0.paddlepaddle.com,ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` + - 启动 pserver 节点 + * - trainer0.paddlepaddle.com + - :code:`PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_IPS=ps0.paddlepaddle.com,ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_TRAINER_ID=0 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` + - 启动第0号 trainer 节点 + * - trainer1.paddlepaddle.com + - :code:`PADDLE_TRAINING_ROLE=TRAINER PADDLE_PSERVER_IPS=ps0.paddlepaddle.com,ps1.paddlepaddle.com PADDLE_TRAINERS=2 PADDLE_TRAINER_ID=1 PADDLE_PSERVER_PORT=6174 python fluid_dist.py` + - 启动第1号 trainer 节点 + +**注意** + +- 需要先启动pserver节点再启动trainer节点 +- 看到trainer节点输出如下日志表示训练任务执行正确 + + .. code:: bash + + step 10, loss: [258.2326202392578] diff --git a/doc/fluid/new_docs/user_guides/howto/training/index.rst b/doc/fluid/new_docs/user_guides/howto/training/index.rst new file mode 100644 index 0000000000..68475101e2 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/index.rst @@ -0,0 +1,12 @@ +############ +训练神经网络 +############ + +PaddlePaddle Fluid支持单机训练,和多节点训练。每种训练模式下,都支持多种训练方法。 + +.. toctree:: + :maxdepth: 2 + + single_node + multi_node + save_load_variables diff --git a/doc/fluid/new_docs/user_guides/howto/training/multi_node.rst b/doc/fluid/new_docs/user_guides/howto/training/multi_node.rst new file mode 100644 index 0000000000..24316f0be0 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/multi_node.rst @@ -0,0 +1,9 @@ +######## +多机训练 +######## + +.. toctree:: + :maxdepth: 2 + + cluster_quick_start.rst + cluster_howto.rst diff --git a/doc/fluid/new_docs/user_guides/howto/training/save_load_variables.rst b/doc/fluid/new_docs/user_guides/howto/training/save_load_variables.rst new file mode 100644 index 0000000000..a96776f4a1 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/save_load_variables.rst @@ -0,0 +1,172 @@ +.. _user_guide_save_load_vars: + +################## +保存与载入模型变量 +################## + +模型变量分类 +############ + +在PaddlePaddle Fluid中,所有的模型变量都用 :code:`fluid.Variable()` 作为基类进行表示。 +在该基类之下,模型变量主要可以分为以下几种类别: + +1. 模型参数 + 模型参数是深度学习模型中被训练和学习的变量,在训练过程中,训练框架根据反向传播算法计算出每一个模型参数当前的梯度, + 并用优化器根据梯度对参数进行更新。模型的训练过程本质上可以看做是模型参数不断迭代更新的过程。 + 在PaddlePaddle Fluid中,模型参数用 :code:`fluid.framework.Parameter` 来表示, + 这是一个 :code:`fluid.Variable()` 的派生类,除了 :code:`fluid.Variable()` 具有的各项性质以外, + :code:`fluid.framework.Parameter` 还可以配置自身的初始化方法、更新率等属性。 + +2. 长期变量 + 长期变量指的是在整个训练过程中持续存在、不会因为一个迭代的结束而被销毁的变量,例如动态调节的全局学习率等。 + 在PaddlePaddle Fluid中,长期变量通过将 :code:`fluid.Variable()` 的 :code:`persistable` + 属性设置为 :code:`True` 来表示。所有的模型参数都是长期变量,但并非所有的长期变量都是模型参数。 + +3. 临时变量 + 不属于上面两个类别的所有模型变量都是临时变量,这种类型的变量只在一个训练迭代中存在,在每一个迭代结束后, + 所有的临时变量都会被销毁,然后在下一个迭代开始之前,又会先构造出新的临时变量供本轮迭代使用。 + 一般情况下模型中的大部分变量都属于这一类别,例如输入的训练数据、一个普通的layer的输出等等。 + + + +如何保存模型变量 +################ + +根据用途的不同,我们需要保存的模型变量也是不同的。例如,如果我们只是想保存模型用来进行以后的预测, +那么只保存模型参数就够用了。但如果我们需要保存一个checkpoint以备将来恢复训练, +那么我们应该将各种长期变量都保存下来,甚至还需要记录一下当前的epoch和step的id。 +因为一些模型变量虽然不是参数,但对于模型的训练依然必不可少。 + +因此,根据需求的不同,我们提供了两套API来分别进行模型的参数和checkpoint的保存。 + +保存模型用于对新样本的预测 +========================== + +如果我们保存模型的目的是用于对新样本的预测,那么只保存模型参数就足够了。我们可以使用 +:code:`fluid.io.save_params()` 接口来进行模型参数的保存。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.save_params(executor=exe, dirname=param_path, main_program=None) + +上面的例子中,通过调用 :code:`fluid.io.save_params` 函数,PaddlePaddle Fluid会对默认 +:code:`fluid.Program` 也就是 :code:`prog` 中的所有模型变量进行扫描, +筛选出其中所有的模型参数,并将这些模型参数保存到指定的 :code:`param_path` 之中。 + + +保存checkpoint用于将来恢复训练 +============================== + +在训练过程中,我们可能希望在一些节点上将当前的训练状态保存下来, +以便在将来需要的时候恢复训练环境继续进行训练。这一般被称作“checkpoint”。 +想要保存checkpoint,可以使用 :code:`fluid.io.save_checkpiont()` 接口。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./checkpoints" + prog = fluid.default_main_program() + trainer_args = {"epoch_id": 200, + "step_id": 20} # just an example + fluid.io.save_checkpoint(executor=exe, + checkpoint_dir=path, + trainer_id=0, + trainer_args=trainer_args, + main_program=prog, + max_num_checkpoints=3) + +上面的例子中,通过调用 :code:`fluid.io.save_checkpoint` 函数,PaddlePaddle Fluid会对默认 +:code:`fluid.Program` 也就是 :code:`prog` 中的所有模型变量进行扫描, +根据一系列内置的规则自动筛选出其中所有需要保存的变量,并将他们保存到指定的 :code:`path` 目录下。 + +:code:`fluid.io.save_checkpoint` 的各个参数中, :code:`trainer_id` 在单机情况下设置为0即可; :code:`trainer_args` +为一个Python dict,用于给定当前的epoch_id和step_id; +:code:`max_num_checkpoints` 用于表示的最大checkpoint数量, +如果目录中已经存在的checkpoint数量超过这个值,那最早的checkpoint将被删除。 + +如何载入模型变量 +################ + +与模型变量的保存相对应,我们提供了两套API来分别载入模型的参数和载入模型的checkpoint。 + +载入模型用于对新样本的预测 +========================== + +对于通过 :code:`fluid.io.save_params` 保存的模型,可以使用 :code:`fluid.io.load_params` +来进行载入。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_params(executor=exe, dirname=param_path, + main_program=prog) + +上面的例子中,通过调用 :code:`fluid.io.load_params` 函数,PaddlePaddle Fluid会对 +:code:`prog` 中的所有模型变量进行扫描,筛选出其中所有的模型参数, +并尝试从 :code:`param_path` 之中读取加载它们。 + +需要格外注意的是,这里的 :code:`prog` 必须和调用 :code:`fluid.io.save_params` +时所用的 :code:`prog` 中的前向部分完全一致,且不能包含任何参数更新的操作。如果两者存在不一致, +那么可能会导致一些变量未被正确加载;如果错误地包含了参数更新操作,那可能会导致正常预测过程中参数被更改。 +这两个 :code:`fluid.Program` 之间的关系类似于训练 :code:`fluid.Program` +和测试 :code:`fluid.Program` 之间的关系,详见: :ref:`user_guide_test_while_training`。 + +另外,需特别注意运行 :code:`fluid.default_startup_program()` 必须在调用 :code:`fluid.io.load_params` +之前。如果在之后运行,可能会覆盖已加载的模型参数导致错误。 + + +载入checkpoint用于恢复训练 +========================== + +对于通过 :code:`fluid.io.save_checkpoint` 保存的模型,可以使用 :code:`fluid.io.load_checkpoint` +来进行载入。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./checkpoints" + prog = fluid.default_main_program() + fluid.io.load_checkpoint(executor=exe, checkpoint_dir=path, + serial=9, main_program=prog) + +上面的例子中,通过调用 :code:`fluid.io.save_checkpoint` 函数,PaddlePaddle Fluid会对 +:code:`prog` 中的所有模型变量进行扫描,根据内置规则自动筛选出需要加载的变量, +并尝试从 :code:`path` 之中加载它们。 + +参数 :code:`serial` 用来标记具体要加载的checkpoint的版本号。在保存checkpoint的时候, +一个checkpoint会被保存在一个子目录中,并在目录名上体现出自己的版本号。 +一般越大的版本号表示这个checkpoint越新。 + +这里的 :code:`prog` 必须和调用 :code:`fluid.io.save_checkpoint` 时所用的 :code:`prog` +完全一致,否则会导致变量加载错误或者未加载。另外,与 :code:`fluid.io.save_params` 类似, +运行 :code:`fluid.default_startup_program()` 也必须在 :code:`fluid.io.load_checkpoint` +之前进行。 + +多机checkpoint保存 +################## + +.. toctree:: + :maxdepth: 2 + + checkpoint_doc_cn.md \ No newline at end of file diff --git a/doc/fluid/new_docs/user_guides/howto/training/single_node.rst b/doc/fluid/new_docs/user_guides/howto/training/single_node.rst new file mode 100644 index 0000000000..23eac0f831 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/single_node.rst @@ -0,0 +1,119 @@ +######## +单机训练 +######## + +准备工作 +######## + +要进行PaddlePaddle Fluid单机训练,需要先 :ref:`user_guide_prepare_data` 和 +:ref:`user_guide_configure_simple_model` 。当\ +:ref:`user_guide_configure_simple_model` 完毕后,可以得到两个\ +:code:`fluid.Program`, :code:`startup_program` 和 :code:`main_program`。 +默认情况下,可以使用 :code:`fluid.default_startup_program()` 与\ :code:`fluid.default_main_program()` 获得全局的 :code:`fluid.Program`。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + image = fluid.layers.data(name="image", shape=[784]) + label = fluid.layers.data(name="label", shape=[1]) + hidden = fluid.layers.fc(input=image, size=100, act='relu') + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.mean( + fluid.layers.cross_entropy( + input=prediction, + label=label + ) + ) + + sgd = fluid.optimizer.SGD(learning_rate=0.001) + sgd.minimize(loss) + + # Here the fluid.default_startup_program() and fluid.default_main_program() + # has been constructed. + +在上述模型配置执行完毕后, :code:`fluid.default_startup_program()` 与\ +:code:`fluid.default_main_program()` 配置完毕了。 + +初始化参数 +########## + +参数随机初始化 +============== + +用户配置完模型后,参数初始化操作会被写入到\ +:code:`fluid.default_startup_program()` 中。使用 :code:`fluid.Executor()` 运行 +这一程序,即可在全局 :code:`fluid.global_scope()` 中随机初始化参数。例如: + +.. code-block:: python + + exe = fluid.Executor(fluid.CUDAPlace(0)) + exe.run(program=fluid.default_startup_program()) + +值得注意的是: 如果使用多GPU训练,参数需要先在GPU0上初始化,再经由\ +:code:`fluid.ParallelExecutor` 分发到多张显卡上。 + + +载入预定义参数 +============== + +在神经网络训练过程中,经常会需要载入预定义模型,进而继续进行训练。\ +如何载入预定义参数,请参考 :ref:`user_guide_save_load_vars`。 + + +单卡训练 +######## + +执行单卡训练可以使用 :code:`fluid.Executor()` 中的 :code:`run()` 方法,运行训练\ +:code:`fluid.Program` 即可。在运行的时候,用户可以通过 :code:`run(feed=...)`\ +参数传入数据;用户可以通过 :code:`run(fetch=...)` 获取持久的数据。例如:\ + +.. code-block:: python + + ... + loss = fluid.layers.mean(...) + + exe = fluid.Executor(...) + # the result is an numpy array + result = exe.run(feed={"image": ..., "label": ...}, fetch_list=[loss]) + +这里有几点注意事项: + +1. feed的数据格式,请参考文章 :ref:`user_guide_feed_data_to_executor`。 +2. :code:`Executor.run` 的返回值是 :code:`fetch_list=[...]` 的variable值。被fetch\ + 的Variable必须是persistable的。 :code:`fetch_list` 可以传入Variable的列表,\ + 也可以传入Variable的名字列表。:code:`Executor.run` 返回Fetch结果列表。 +3. 如果需要取回的数据包含序列信息,可以设置 + :code:`exe.run(return_numpy=False, ...)` 直接返回 :code:`fluid.LoDTensor` + 。用户可以直接访问 :code:`fluid.LoDTensor` 中的信息。 + +多卡训练 +######## + +执行多卡训练可以使用 :code:`fluid.ParallelExecutor` 运行训练 +:code:`fluid.Program`。例如: + +.. code-block:: python + + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name, + main_program=fluid.default_main_program()) + train_exe.run(fetch_list=[loss.name], feed={...}) + +这里有几点注意事项: + +1. :code:`ParallelExecutor` 的构造函数需要指明要执行的 :code:`fluid.Program` , + 并在执行过程中不能修改。默认值是 :code:`fluid.default_main_program()` 。 +2. :code:`ParallelExecutor` 需要明确指定是否使用 CUDA 显卡进行训练。在显卡训练\ + 模式下会占用全部显卡。用户可以配置 `CUDA_VISIBLE_DEVICES `_ 来修改占用\ + 的显卡。 + +进阶使用 +######## + +.. toctree:: + :maxdepth: 2 + + test_while_training + save_load_variables diff --git a/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_nccl2.graffle b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_nccl2.graffle new file mode 100644 index 0000000000..16f6b8835c Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_nccl2.graffle differ diff --git a/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_nccl2.png b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_nccl2.png new file mode 100644 index 0000000000..587a1a48af Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_nccl2.png differ diff --git a/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_pserver.graffle b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_pserver.graffle new file mode 100644 index 0000000000..046c490323 Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_pserver.graffle differ diff --git a/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_pserver.png b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_pserver.png new file mode 100644 index 0000000000..cd2f92ad1a Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/training/src/dist_train_pserver.png differ diff --git a/doc/fluid/new_docs/user_guides/howto/training/src/parallelism.png b/doc/fluid/new_docs/user_guides/howto/training/src/parallelism.png new file mode 100644 index 0000000000..6c078b5241 Binary files /dev/null and b/doc/fluid/new_docs/user_guides/howto/training/src/parallelism.png differ diff --git a/doc/fluid/new_docs/user_guides/howto/training/test_while_training.rst b/doc/fluid/new_docs/user_guides/howto/training/test_while_training.rst new file mode 100644 index 0000000000..37d5c0d781 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/howto/training/test_while_training.rst @@ -0,0 +1,120 @@ +.. _user_guide_test_while_training: + +################## +训练过程中评测模型 +################## + +模型的测试评价与训练的 :code:`fluid.Program` 不同。在测试评价中: + +1. 评价测试不进行反向传播,不优化更新参数。 +2. 评价测试执行的操作可以不同。 + + * 例如 BatchNorm 操作,在训练和测试时执行不同的算法。 + + * 评价模型与训练相比可以是完全不同的模型。 + +生成测试 :code:`fluid.Program` +################################# + +通过克隆训练 :code:`fluid.Program` 生成测试 :code:`fluid.Program` +======================================================================= + +:code:`Program.clone()` 方法可以复制出新的 :code:`fluid.Program` 。 通过设置 +:code:`Program.clone(for_test=True)` 复制含有用于测试的操作Program。简单的使用方法如下: + +.. code-block:: python + + import paddle.fluid as fluid + + img = fluid.layers.data(name="image", shape=[784]) + prediction = fluid.layers.fc( + input=fluid.layers.fc(input=img, size=100, act='relu'), + size=10, + act='softmax' + ) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + loss = fluid.layers.mean(fluid.layers.cross_entropy(input=prediction, label=label)) + acc = fluid.layers.accuracy(input=prediction, label=label) + + test_program = fluid.default_main_program().clone(for_test=True) + + adam = fluid.optimizer.Adam(learning_rate=0.001) + adam.minimize(loss) + +在使用 :code:`Optimizer` 之前,将 :code:`fluid.default_main_program()` 复制\ +成一个 :code:`test_program` 。之后使用测试数据运行 :code:`test_program`,\ +就可以做到运行测试程序,而不影响训练结果。 + +分别配置训练 :code:`fluid.Program` 和测试 :code:`fluid.Program` +===================================================================== + +如果训练程序和测试程序相差较大时,用户也可以通过完全定义两个不同的 +:code:`fluid.Program`,分别进行训练和测试。在PaddlePaddle Fluid中,\ +所有的参数都有名字。如果两个不同的操作,甚至两个不同的网络使用了同样名字的参数,\ +那么他们的值和内存空间都是共享的。 + +PaddlePaddle Fluid中使用 :code:`fluid.unique_name` 包来随机初始化用户未定义的\ +参数名称。通过 :code:`fluid.unique_name.guard` 可以确保多次调用某函数\ +参数初始化的名称一致。 + +例如: + +.. code-block:: python + + import paddle.fluid as fluid + + def network(is_test): + file_obj = fluid.layers.open_files(filenames=["test.recordio"] if is_test else ["train.recordio"], ...) + img, label = fluid.layers.read_file(file_obj) + hidden = fluid.layers.fc(input=img, size=100, act="relu") + hidden = fluid.layers.batch_norm(input=hidden, is_test=is_test) + ... + return loss + + with fluid.unique_name.guard(): + train_loss = network(is_test=False) + sgd = fluid.optimizer.SGD(0.001) + sgd.minimize(train_loss) + + test_program = fluid.Program() + with fluid.unique_name.guard(): + with fluid.program_gurad(test_program, fluid.Program()): + test_loss = network(is_test=True) + + # fluid.default_main_program() is the train program + # fluid.test_program is the test program + +执行测试 :code:`fluid.Program` +################################# + +使用 :code:`Executor` 执行测试 :code:`fluid.Program` +======================================================= + +用户可以使用 :code:`Executor.run(program=...)` 来执行测试 +:code:`fluid.Program`。 + +例如 + +.. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + test_acc = exe.run(program=test_program, feed=test_data_batch, fetch_list=[acc]) + print 'Test accuracy is ', test_acc + +使用 :code:`ParallelExecutor` 执行测试 :code:`fluid.Program` +=============================================================== + +用户可以使用训练用的 :code:`ParallelExecutor` 与测试 :code:`fluid.Program` +一起新建一个测试的 :code:`ParallelExecutor` ;再使用测试 +:code:`ParallelExecutor.run` 来执行测试。 + +例如: + +.. code-block:: python + + train_exec = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + + test_exec = fluid.ParallelExecutor(use_cuda=True, share_vars_from=train_exec, + main_program=test_program) + test_acc = test_exec.run(fetch_list=[acc], ...) + diff --git a/doc/fluid/new_docs/user_guides/index.rst b/doc/fluid/new_docs/user_guides/index.rst new file mode 100644 index 0000000000..453cb71cfd --- /dev/null +++ b/doc/fluid/new_docs/user_guides/index.rst @@ -0,0 +1,18 @@ +######## +使用指南 +######## + + +.. todo:: + + 完善导引介绍 + +.. toctree:: + :maxdepth: 2 + + howto/prepare_data/index + howto/configure_simple_model/index + howto/training/index + howto/debug/index + howto/evaluation/index + models/index.rst diff --git a/doc/fluid/new_docs/user_guides/models/index.rst b/doc/fluid/new_docs/user_guides/models/index.rst new file mode 100644 index 0000000000..998e95c488 --- /dev/null +++ b/doc/fluid/new_docs/user_guides/models/index.rst @@ -0,0 +1,137 @@ +Fluid 模型库 +============ + +图像分类 +-------- + +图像分类是根据图像的语义信息对不同类别图像进行区分,是计算机视觉中重要的基础问题,是物体检测、图像分割、物体跟踪、行为分析、人脸识别等其他高层视觉任务的基础,在许多领域都有着广泛的应用。如:安防领域的人脸识别和智能视频分析等,交通领域的交通场景识别,互联网领域基于内容的图像检索和相册自动归类,医学领域的图像识别等。 + +在深度学习时代,图像分类的准确率大幅度提升,在图像分类任务中,我们向大家介绍了如何在经典的数据集ImageNet上,训练常用的模型,包括AlexNet、VGG、GoogLeNet、ResNet、Inception-v4、MobileNet、DPN(Dual +Path +Network)、SE-ResNeXt模型,也开源了\ `训练的模型 `__\ 方便用户下载使用。同时提供了能够将Caffe模型转换为PaddlePaddle +Fluid模型配置和参数文件的工具。 + +- `AlexNet `__ +- `VGG `__ +- `GoogleNet `__ +- `Residual + Network `__ +- `Inception-v4 `__ +- `MobileNet `__ +- `Dual Path + Network `__ +- `SE-ResNeXt `__ +- `Caffe模型转换为Paddle + Fluid配置和模型文件工具 `__ + +目标检测 +-------- + +目标检测任务的目标是给定一张图像或是一个视频帧,让计算机找出其中所有目标的位置,并给出每个目标的具体类别。对于人类来说,目标检测是一个非常简单的任务。然而,计算机能够“看到”的是图像被编码之后的数字,很难解图像或是视频帧中出现了人或是物体这样的高层语义概念,也就更加难以定位目标出现在图像中哪个区域。与此同时,由于目标会出现在图像或是视频帧中的任何位置,目标的形态千变万化,图像或是视频帧的背景千差万别,诸多因素都使得目标检测对计算机来说是一个具有挑战性的问题。 + +在目标检测任务中,我们介绍了如何基于\ `PASCAL +VOC `__\ 、\ `MS +COCO `__\ 数据训练通用物体检测模型,当前介绍了SSD算法,SSD全称Single Shot MultiBox Detector,是目标检测领域较新且效果较好的检测算法之一,具有检测速度快且检测精度高的特点。 + +开放环境中的检测人脸,尤其是小的、模糊的和部分遮挡的人脸也是一个具有挑战的任务。我们也介绍了如何基于 `WIDER FACE `_ 数据训练百度自研的人脸检测PyramidBox模型,该算法于2018年3月份在WIDER FACE的多项评测中均获得 `第一名 `_。 + +- `Single Shot MultiBox + Detector `__ +- `Face Detector: PyramidBox `_ + +图像语义分割 +------------ + +图像语意分割顾名思义是将图像像素按照表达的语义含义的不同进行分组/分割,图像语义是指对图像内容的理解,例如,能够描绘出什么物体在哪里做了什么事情等,分割是指对图片中的每个像素点进行标注,标注属于哪一类别。近年来用在无人车驾驶技术中分割街景来避让行人和车辆、医疗影像分析中辅助诊断等。 + +在图像语义分割任务中,我们介绍如何基于图像级联网络(Image Cascade +Network,ICNet)进行语义分割,相比其他分割算法,ICNet兼顾了准确率和速度。 + +- `ICNet `__ + +场景文字识别 +------------ + +许多场景图像中包含着丰富的文本信息,对理解图像信息有着重要作用,能够极大地帮助人们认知和理解场景图像的内容。场景文字识别是在图像背景复杂、分辨率低下、字体多样、分布随意等情况下,将图像信息转化为文字序列的过程,可认为是一种特别的翻译过程:将图像输入翻译为自然语言输出。场景图像文字识别技术的发展也促进了一些新型应用的产生,如通过自动识别路牌中的文字帮助街景应用获取更加准确的地址信息等。 + +在场景文字识别任务中,我们介绍如何将基于CNN的图像特征提取和基于RNN的序列翻译技术结合,免除人工定义特征,避免字符分割,使用自动学习到的图像特征,完成端到端地无约束字符定位和识别。当前,介绍了CRNN-CTC模型,后续会引入基于注意力机制的序列到序列模型。 + +- `CRNN-CTC模型 `__ + +语音识别 +-------- + +自动语音识别(Automatic Speech Recognition, +ASR)是将人类声音中的词汇内容转录成计算机可输入的文字的技术。语音识别的相关研究经历了漫长的探索过程,在HMM/GMM模型之后其发展一直较为缓慢,随着深度学习的兴起,其迎来了春天。在多种语言识别任务中,将深度神经网络(DNN)作为声学模型,取得了比GMM更好的性能,使得 +ASR +成为深度学习应用最为成功的领域之一。而由于识别准确率的不断提高,有越来越多的语言技术产品得以落地,例如语言输入法、以智能音箱为代表的智能家居设备等 +—— 基于语言的交互方式正在深刻的改变人类的生活。 + +与 `DeepSpeech `__ +中深度学习模型端到端直接预测字词的分布不同,本实例更接近传统的语言识别流程,以音素为建模单元,关注语言识别中声学模型的训练,利用\ `kaldi `__\ 进行音频数据的特征提取和标签对齐,并集成 +kaldi 的解码器完成解码。 + +- `DeepASR `__ + +机器翻译 +-------- + +机器翻译(Machine +Translation)将一种自然语言(源语言)转换成一种自然语言(目标语音),是自然语言处理中非常基础和重要的研究方向。在全球化的浪潮中,机器翻译在促进跨语言文明的交流中所起的重要作用是不言而喻的。其发展经历了统计机器翻译和基于神经网络的神经机器翻译(Nueural +Machine Translation, NMT)等阶段。在 NMT +成熟后,机器翻译才真正得以大规模应用。而早阶段的 NMT +主要是基于循环神经网络 RNN +的,其训练过程中当前时间步依赖于前一个时间步的计算,时间步之间难以并行化以提高训练速度。因此,非 +RNN 结构的 NMT 得以应运而生,例如基于卷积神经网络 CNN +的结构和基于自注意力机制(Self-Attention)的结构。 + +本实例所实现的 Transformer +就是一个基于自注意力机制的机器翻译模型,其中不再有RNN或CNN结构,而是完全利用 +Attention 学习语言中的上下文依赖。相较于RNN/CNN, +这种结构在单层内计算复杂度更低、易于并行化、对长程依赖更易建模,最终在多种语言之间取得了最好的翻译效果。 + +- `Transformer `__ + +强化学习 +-------- + +强化学习是近年来一个愈发重要的机器学习方向,特别是与深度学习相结合而形成的深度强化学习(Deep +Reinforcement Learning, +DRL),取得了很多令人惊异的成就。人们所熟知的战胜人类顶级围棋职业选手的 +AlphaGo 就是 DRL +应用的一个典型例子,除游戏领域外,其它的应用还包括机器人、自然语言处理等。 + +深度强化学习的开山之作是在Atari视频游戏中的成功应用, +其可直接接受视频帧这种高维输入并根据图像内容端到端地预测下一步的动作,所用到的模型被称为深度Q网络(Deep +Q-Network, DQN)。本实例就是利用PaddlePaddle Fluid这个灵活的框架,实现了 +DQN 及其变体,并测试了它们在 Atari 游戏中的表现。 + +- `DeepQNetwork `__ + +中文词法分析 +------------ + +中文分词(Word Segmentation)是将连续的自然语言文本,切分出具有语义合理性和完整性的词汇序列的过程。因为在汉语中,词是承担语义的最基本单位,切词是文本分类、情感分析、信息检索等众多自然语言处理任务的基础。 词性标注(Part-of-speech Tagging)是为自然语言文本中的每一个词汇赋予一个词性的过程,这里的词性包括名词、动词、形容词、副词等等。 命名实体识别(Named Entity Recognition,NER)又称作“专名识别”,是指识别自然语言文本中具有特定意义的实体,主要包括人名、地名、机构名、专有名词等。 我们将这三个任务统一成一个联合任务,称为词法分析任务,基于深度神经网络,利用海量标注语料进行训练,提供了一个端到端的解决方案。 + +我们把这个联合的中文词法分析解决方案命名为LAC。LAC既可以认为是Lexical Analysis of Chinese的首字母缩写,也可以认为是LAC Analyzes Chinese的递归缩写。 + +- `LAC `__ + +情感倾向分析 +------------ + +情感倾向分析针对带有主观描述的中文文本,可自动判断该文本的情感极性类别并给出相应的置信度。情感类型分为积极、消极、 中性。情感倾向分析能够帮助企业理解用户消费习惯、分析热点话题和危机舆情监控,为企业提供有力的决策支持。本次我们开放 AI开放平台中情感倾向分析采用的模型(http://ai.baidu.com/tech/nlp/sentiment_classify ), 提供给用户使用。 + +- `Senta `__ + +AnyQ +---- + +`AnyQ `__\ (ANswer Your Questions) +开源项目主要包含面向FAQ集合的问答系统框架、文本语义匹配工具SimNet。 +问答系统框架采用了配置化、插件化的设计,各功能均通过插件形式加入,当前共开放了20+种插件。开发者可以使用AnyQ系统快速构建和定制适用于特定业务场景的FAQ问答系统,并加速迭代和升级。 + +SimNet是百度自然语言处理部于2013年自主研发的语义匹配框架,该框架在百度各产品上广泛应用,主要包括BOW、CNN、RNN、MM-DNN等核心网络结构形式,同时基于该框架也集成了学术界主流的语义匹配模型,如MatchPyramid、MV-LSTM、K-NRM等模型。使用SimNet构建出的模型可以便捷的加入AnyQ系统中,增强AnyQ系统的语义匹配能力。 + +- `SimNet in PaddlePaddle + Fluid `__ diff --git a/doc/mobile/CMakeLists.txt b/doc/mobile/CMakeLists.txt index b104a6318d..7b34ba8d07 100644 --- a/doc/mobile/CMakeLists.txt +++ b/doc/mobile/CMakeLists.txt @@ -15,6 +15,9 @@ set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") # HTML output director set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") +set(IMPORT_PADDLE_STRING "") +set(IMPORT_PADDLEV2_STRING "") + configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" "${BINARY_BUILD_DIR_EN}/conf.py" @@ -27,8 +30,6 @@ sphinx_add_target(paddle_mobile_docs ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_mobile_docs gen_proto_py paddle_python) - # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -49,5 +50,3 @@ sphinx_add_target(paddle_mobile_docs_cn ${SPHINX_CACHE_DIR_CN} ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) - -add_dependencies(paddle_mobile_docs_cn gen_proto_py paddle_python) diff --git a/doc/mobile/cross_compiling_for_android_cn.md b/doc/mobile/cross_compiling_for_android_cn.md index cdd6917239..0607748b75 100644 --- a/doc/mobile/cross_compiling_for_android_cn.md +++ b/doc/mobile/cross_compiling_for_android_cn.md @@ -63,16 +63,16 @@ Android的Docker开发镜像向用户提供两个可配置的参数: - 编译`armeabi-v7a`,`Android API 21`的PaddlePaddle库 ```bash -$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev +$ docker run -it --rm -v $PWD:/paddle -w /paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" username/paddle-android:dev ./paddle/scripts/paddle_build.sh build_android ``` - 编译`arm64-v8a`,`Android API 21`的PaddlePaddle库 ```bash -$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev +$ docker run -it --rm -v $PWD:/paddle -w /paddle -e "ANDROID_ABI=arm64-v8a" -e "ANDROID_API=21" username/paddle-android:dev ./paddle/scripts/paddle_build.sh build_android ``` -执行上述`docker run`命令时,容器默认执行[paddle/scripts/docker/build_android.sh](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build_android.sh)脚本。该脚本中记录了交叉编译Android版PaddlePaddle库常用的CMake配置,并且会根据`ANDROID_ABI`和`ANDROID_API`自动构建独立工具链、进行编译和安装。由于arm64架构要求Android API不小于21。因此当`ANDROID_ABI=arm64-v8a`,`ANDROID_API<21`时,Docker容器中将默认使用`Android API 21`的编译工具链。用户可以参考下文[配置交叉编译参数](#配置交叉编译参数)章节,根据个人的需求修改定制Docker容器所执行的脚本。编译安装结束之后,PaddlePaddle的C-API库将被安装到`$PWD/install_android`目录,所依赖的第三方库同时也被安装到`$PWD/install_android/third_party`目录。 +执行上述`docker run`命令时,容器执行[paddle/scripts/paddle_build.sh build_android](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/paddle_build.sh)脚本。该脚本中记录了交叉编译Android版PaddlePaddle库常用的CMake配置,并且会根据`ANDROID_ABI`和`ANDROID_API`自动构建独立工具链、进行编译和安装。由于arm64架构要求Android API不小于21。因此当`ANDROID_ABI=arm64-v8a`,`ANDROID_API<21`时,Docker容器中将默认使用`Android API 21`的编译工具链。用户可以参考下文[配置交叉编译参数](#配置交叉编译参数)章节,根据个人的需求修改定制Docker容器所执行的脚本。编译安装结束之后,PaddlePaddle的C-API库将被安装到`$PWD/install_android`目录,所依赖的第三方库同时也被安装到`$PWD/install_android/third_party`目录。 ## 基于Linux交叉编译环境的编译方式 本文档将以Linux x86-64平台为例,介绍交叉编译Android平台上适用的PaddlePaddle库的方法和步骤。 diff --git a/doc/mobile/cross_compiling_for_android_en.md b/doc/mobile/cross_compiling_for_android_en.md index 6af16fc114..572063e801 100644 --- a/doc/mobile/cross_compiling_for_android_en.md +++ b/doc/mobile/cross_compiling_for_android_en.md @@ -36,7 +36,7 @@ $ docker pull docker.paddlepaddlehub.com/paddle:latest-dev-android We can run the Docker image we just created to build the inference library of PaddlePaddle for Android using the command below: ```bash -$ docker run -it --rm -v $PWD:/paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" paddle:dev-android +$ docker run -it --rm -v $PWD:/paddle -w /paddle -e "ANDROID_ABI=armeabi-v7a" -e "ANDROID_API=21" paddle:dev-android ./paddle/scripts/paddle_build.sh build_android ``` The Docker image accepts two arguments `ANDROID_ABI` and `ANDROID_API`: @@ -70,7 +70,7 @@ The Docker image accepts two arguments `ANDROID_ABI` and `ANDROID_API`: The ARM-64 architecture (`arm64-v8a`) requires at least level 21 of Android API. -The default entry-point of the Docker image, [`paddle/scripts/docker/build_android.sh`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build_android.sh) generates the [Android cross-compiling standalone toolchain](https://developer.android.com/ndk/guides/standalone_toolchain.html) based on the argument: `ANDROID_ABI` or `ANDROID_API`. For information about other configuration arguments, please continue reading. +The build command, [`paddle/scripts/paddle_build.sh build_android`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/paddle_build.sh) generates the [Android cross-compiling standalone toolchain](https://developer.android.com/ndk/guides/standalone_toolchain.html) based on the argument: `ANDROID_ABI` or `ANDROID_API`. For information about other configuration arguments, please continue reading. The above command generates and outputs the inference library in `$PWD/install_android` and puts third-party libraries in `$PWD/install_android/third_party`. diff --git a/doc/mobile/index_cn.rst b/doc/mobile/index_cn.rst index 8297316e8f..56d1515005 100644 --- a/doc/mobile/index_cn.rst +++ b/doc/mobile/index_cn.rst @@ -1,9 +1,9 @@ 移动端 -===== +====== .. toctree:: :maxdepth: 1 cross_compiling_for_android_cn.md cross_compiling_for_ios_cn.md - cross_compiling_for_raspberry_cn.md \ No newline at end of file + cross_compiling_for_raspberry_cn.md diff --git a/doc/survey/dynamic_graph.md b/doc/survey/dynamic_graph.md new file mode 100644 index 0000000000..6b80b014b1 --- /dev/null +++ b/doc/survey/dynamic_graph.md @@ -0,0 +1,378 @@ +# Automatic Differentiation with the Tape + +## Automatic Differentiation + +A key challenge in the field of deep learning is to automatically derive the backward pass from the forward pass described algorithmically by researchers. Such a derivation, or a transformation of the forward pass program, has been long studied before the recent prosperity of deep learning in the field known as [automatic differentiation](https://arxiv.org/pdf/1502.05767.pdf). + +## The Tape + +Given the forward pass program (usually in Python in practices), there are two strategies to derive the backward pass: + +1. from the forward pass program itself, or +1. from the execution trace of the forward pass program, which is often known as the *tape*. + +This article surveys systems that follow the latter strategy. + +## Dynamic Network + +When we train a deep learning model, the tape changes every iteration as the input data change, so we have to re-derive the backward pass every iteration. This is known as *dynamic network*. + +Deep learning systems that utilize the idea of dynamic network gained their popularities in recent years. This article surveys two representative systems: [PyTorch](https://pytorch.org/) and [DyNet](https://dynet.readthedocs.io/en/latest/). + +## An Overview + +Both frameworks record a ‘tape’ of the computation and interpreting (or run-time compiling) a transformation of the tape played back in reverse. This tape is a different kind of entity than the original program.[[link]](http://www.bcl.hamilton.ie/~barak/papers/toplas-reverse.pdf) + +Consider the following code feedforward model. + +```python +x = Variable(randn(20, 1))) +label = Variable(randint(1)) +W_1, W_2 = Variable(randn(20, 20)), Variable(randn(10, 20)) +h = matmul(W_1, x) +pred = matmul(W_2, x) +loss = softmax(pred, label) +loss.backward() +``` + +### 1) Dynet uses List to encode the Tape + +During the forward execution, a list of operators, in this case `matmul`, `matmul` and `softmax`, are recorded in the tape, along with the necessary information needed to do the backward such as pointers to the inputs and outputs. Then the tape is played in reverse order at `loss.backward()`. + +
+ +digraph g { + graph [ + rankdir = "LR" + ]; + node [ + fontsize = "16" + shape = "ellipse" + ]; + edge []; + "node0" [ + label = " type: matmul | input: W_1, x | output: h" + shape = "record" + ]; + "node1" [ + label = " type: matmul | input: W_2, h | output: pred" + shape = "record" + ]; + "node2" [ + label = " type: softmax | input: pred, label | output: loss" + shape = "record" + ]; + "node0":f0 -> "node1":f0 []; + "node1":f0 -> "node2":f0 []; +} +
+ +![Alt text](https://g.gravizo.com/svg?digraph%20g%20{%20graph%20[%20rankdir%20=%20%22LR%22%20];%20node%20[%20fontsize%20=%20%2216%22%20shape%20=%20%22ellipse%22%20];%20edge%20[];%20%22node0%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20%3Cf1%3E%20input:%20W_1,%20x%20|%20%3Cf2%3E%20output:%20h%22%20shape%20=%20%22record%22%20];%20%22node1%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20%3Cf1%3E%20input:%20W_2,%20h%20|%20%3Cf2%3E%20output:%20pred%22%20shape%20=%20%22record%22%20];%20%22node2%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20softmax%20|%20%3Cf1%3E%20input:%20pred,%20label%20|%20%3Cf2%3E%20output:%20loss%22%20shape%20=%20%22record%22%20];%20%22node0%22:f0%20-%3E%20%22node1%22:f0%20[%20id%20=%200%20];%20%22node1%22:f0%20-%3E%20%22node2%22:f0%20[%20id%20=%201%20];%20}) + +### 2) Pytorch uses Node Graph to encode the Tape + +The graph is composed of `Variable`s and `Function`s. During the forward execution, a `Variable` records its creator function, e.g. `h.creator = matmul`. And a Function records its inputs' previous/dependent functions `prev_func` through `creator`, e.g. `matmul.prev_func = matmul1`. At `loss.backward()`, a topological sort is performed on all `prev_func`s. Then the grad op is performed by the sorted order. + +
+ +digraph g { + graph [ + rankdir = "LR" + ]; + + subgraph function { + node [ + fontsize = "16" + style = filled + shape = "record" + ]; + "matmul0" [ label = " type: matmul | prev_func: None" ]; + "matmul1" [ label = " type: matmul | prev_func: matmul" ]; + "softmax" [ label = " type: softmax | prev_func: matmul" ]; + } + + subgraph variable { + node [ + fontsize = "16" + shape = "Mrecord" + style = filled + fillcolor = white + ]; + "x" [ label = " x | creator: None" ]; + "label" [ label = " label | creator: None" ]; + "W_1" [ label = " W_1 | creator: None" ]; + "W_2" [ label = " W_2 | creator: None" ]; + "h" [ label = " h | creator: None" ]; + "pred" [ label = " pred | creator: matmul" ]; + "loss" [ label = " loss | creator: softmax" ]; + } + + subgraph data_flow { + "x":f0 -> "matmul0":f0; + "W_1":f0 -> "matmul0":f0; + "matmul0":f0 -> "h":f0; + + "h":f0 -> "matmul1":f0; + "W_2":f0 -> "matmul1":f0; + "matmul1":f0 -> "pred":f0; + + "pred":f0 -> "softmax":f0; + "label":f0 -> "softmax":f0; + "softmax":f0 -> "loss":f0; + } + + subgraph prev_func { + edge [color="red", arrowsize="0.6", penwidth="1", constraint=false]; + "matmul1":f1 -> "matmul0":f0; + "softmax":f1 -> "matmul1":f0; + label = "prev_func"; + } +} +
+ +![Alt text](https://g.gravizo.com/svg?digraph%20g%20{%20graph%20[%20rankdir%20=%20%22LR%22%20];%20subgraph%20function%20{%20node%20[%20fontsize%20=%20%2216%22%20style%20=%20filled%20shape%20=%20%22record%22%20];%20%22matmul0%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20prev_func:%20None%22%20];%20%22matmul1%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20matmul%20|%20prev_func:%20matmul%22%20];%20%22softmax%22%20[%20label%20=%20%22%3Cf0%3E%20type:%20softmax%20|%20prev_func:%20matmul%22%20];%20}%20subgraph%20variable%20{%20node%20[%20fontsize%20=%20%2216%22%20shape%20=%20%22Mrecord%22%20style%20=%20filled%20fillcolor%20=%20white%20];%20%22x%22%20[%20label%20=%20%22%3Cf0%3E%20x%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22label%22%20[%20label%20=%20%22%3Cf0%3E%20label%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22W_1%22%20[%20label%20=%20%22%3Cf0%3E%20W_1%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22W_2%22%20[%20label%20=%20%22%3Cf0%3E%20W_2%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22h%22%20[%20label%20=%20%22%3Cf0%3E%20h%20|%20%3Cf1%3E%20creator:%20None%22%20];%20%22pred%22%20[%20label%20=%20%22%3Cf0%3E%20pred%20|%20%3Cf1%3E%20creator:%20matmul%22%20];%20%22loss%22%20[%20label%20=%20%22%3Cf0%3E%20loss%20|%20%3Cf1%3E%20creator:%20softmax%22%20];%20}%20subgraph%20data_flow%20{%20%22x%22:f0%20-%3E%20%22matmul0%22:f0;%20%22W_1%22:f0%20-%3E%20%22matmul0%22:f0;%20%22matmul0%22:f0%20-%3E%20%22h%22:f0;%20%22h%22:f0%20-%3E%20%22matmul1%22:f0;%20%22W_2%22:f0%20-%3E%20%22matmul1%22:f0;%20%22matmul1%22:f0%20-%3E%20%22pred%22:f0;%20%22pred%22:f0%20-%3E%20%22softmax%22:f0;%20%22label%22:f0%20-%3E%20%22softmax%22:f0;%20%22softmax%22:f0%20-%3E%20%22loss%22:f0;%20}%20subgraph%20prev_func%20{%20edge%20[color=%22red%22,%20arrowsize=%220.6%22,%20penwidth=%221%22,%20constraint=false];%20%22matmul1%22:f1%20-%3E%20%22matmul0%22:f0;%20%22softmax%22:f1%20-%3E%20%22matmul1%22:f0;%20label%20=%20%22prev_func%22;%20}%20}) + +Chainer and Autograd uses the similar techniques to record the forward pass. For details please refer to the appendix. + +## Design choices + +### 1) Dynet's List vs Pytorch's Node Graph + +What's good about List: +1. It avoids a topological sort. One only needs to traverse the list of operators in reverse and calling the corresponding backward operator. +1. It promises effient data parallelism implementations. One could count the time of usage of a certain variable during the construction list. Then in the play back, one knows the calculation of a variable has completed. This enables communication and computation overlapping. + +What's good about Node Graph: +1. More flexibility. PyTorch users can mix and match independent graphs however they like, in whatever threads they like (without explicit synchronization). An added benefit of structuring graphs this way is that when a portion of the graph becomes dead, it is automatically freed. [[2]](https://openreview.net/pdf?id=BJJsrmfCZ) Consider the following example, Pytorch only does backward on SmallNet while Dynet does both BigNet and SmallNet. +```python +result = BigNet(data) +loss = SmallNet(data) +loss.backward() +``` + +### 2) Dynet's Lazy evaluation vs Pytorch's Immediate evaluation + +Dynet builds the list in a symbolic matter. Consider the following example +```python +for epoch in range(num_epochs): + for in_words, out_label in training_data: + dy.renew_cg() + W = dy.parameter(W_p) + b = dy.parameter(b_p) + score_sym = dy.softmax(W*dy.concatenate([E[in_words[0]],E[in_words[1]]])+b) + loss_sym = dy.pickneglogsoftmax(score_sym, out_label) + loss_val = loss_sym.value() + loss_sym.backward() +``` +The computation of `lookup`, `concat`, `matmul` and `softmax` didn't happen until the call of `loss_sym.value()`. This defered execution is useful because it allows some graph-like optimization possible, e.g. kernel fusion. + +Pytorch chooses immediate evaluation. It avoids ever materializing a "forward graph"/"tape" (no need to explicitly call `dy.renew_cg()` to reset the list), recording only what is necessary to differentiate the computation, i.e. `creator` and `prev_func`. + + +## What can fluid learn from them? + +Please refer to `paddle/contrib/dynamic/`. + +# Appendix + +### Overview + +| Framework | Has Tape | Core in C++ | First Release Date | +|-----------|----------|-------------|--------------------| +| Autograd | No | No | Mar 5, 2015 | +| Chainer | No | No | Jun 5, 2015 | +| Pytorch | No | Yes | Aug 31, 2016 | +| Dynet | Yes | Yes | Oct 12, 2016 | + +### Source Code +#### Autograd +[Backward code](https://github.com/HIPS/autograd/blob/442205dfefe407beffb33550846434baa90c4de7/autograd/core.py#L8-L40). In the forward pass, a graph of VJPNode is constructed. +```python +# User API +def make_grad(fun, x): + start_node = VJPNode.new_root() + end_value, end_node = trace(start_node, fun, x) + return backward_pass(g, end_node), end_value + +# trace the forward pass by creating VJPNodes +def trace(start_node, fun, x): + with trace_stack.new_trace() as t: + start_box = new_box(x, t, start_node) + end_box = fun(start_box) + return end_box._value, end_box._node + +def backward_pass(g, end_node): + outgrads = {end_node : (g, False)} + for node in toposort(end_node): + outgrad = outgrads.pop(node) + ingrads = node.vjp(outgrad[0]) + for parent, ingrad in zip(node.parents, ingrads): + outgrads[parent] = add_outgrads(outgrads.get(parent), ingrad) + return outgrad[0] + +# Every VJPNode corresponds to a op_grad +class VJPNode(Node): + __slots__ = ['parents', 'vjp'] + def __init__(self, value, fun, args, kwargs, parent_argnums, parents): + self.parents = parents + vjpmaker = primitive_vjps[fun] + self.vjp = vjpmaker(parent_argnums, value, args, kwargs) +``` +#### Chainer +Example Code +```python +# (1) Function Set definition, creates FunctionNode +model = FunctionSet( + l1=F.Linear(784, 100), + l2=F.Linear(100, 100), + l3=F.Linear(100, 10)).to_gpu() + +# (2) Optimizer Setup +opt = optimizers.SGD() +opt.setup(model) + +# (3) Forward computation +def forward(x, t): + h1 = F.relu(model.l1(x)) + h2 = F.relu(model.l2(h1)) + y = model.l3(h2) + return F.softmax_cross_entropy(y, t) + +# (4) Training loop +for epoch in xrange(n_epoch): + for i in xrange(0, N, b_size): + x = Variable(to_gpu(...)) + t = Variable(to_gpu(...)) + opt.zero_grads() + loss = forward(x, t) + loss.backward() + opt.update() +``` +In `forward(x, t)`, a graph of [`VariableNode`](https://github.com/chainer/chainer/blob/master/chainer/variable.py#L110) and [`FunctionNode`](https://github.com/chainer/chainer/blob/a69103a4aa59d5b318f39b01dbcb858d465b89cf/chainer/function_node.py#L19) is constructed. Every output's `VariableNode.creator` is pointed to the `FunctionNode`. +```python +class FunctionNode(object): + ... + def apply(self, inputs): + outputs = self.forward(inputs) + ret = tuple([variable.Variable(y, requires_grad=requires_grad) + for y in outputs]) + # Topological ordering + self.rank = max([x.rank for x in inputs]) if input_vars else 0 + # Add backward edges + for y in ret: + y.creator_node = self + self.inputs = tuple([x.node for x in input_vars]) + self.outputs = tuple([y.node for y in ret]) + + return ret +``` +`loss.backward()` will calculate the accumulated gradient of all variables. All the backward of `FunctionNode`s will be called based on the topological order. +```python +class VariableNode(object): + ... + def backward(self, retain_grad, loss_scale): + if self.creator_node is None: + return + + cand_funcs = [] + seen_set = set() + grads = {} + + # Initialize error by 1, if this is a loss variable + if self.data.size == 1 and self._grad_var is None: + self.grad = numpy.ones_like(self.data) + grads[self._node] = self._grad_var + + def add_cand(cand): + if cand not in seen_set: + # Negate since heapq is min-heap. This is a global variable + heapq.heappush(cand_funcs, (-cand.rank, len(seen_set), cand)) + seen_set.add(cand) + + add_cand(self.creator_node) + + while cand_funcs: + _, _, func = heapq.heappop(cand_funcs) + gxs = func.backward_accumulate(func.inputs, func.outputs, func.outputs.grad) + + for x, gx in enumerate(gxs): + if x in grads: + grads[x] += gx + else: + grads[x] = gx + + if x.creator_node is not None: + add_cand(x.creator_node) +``` + +#### PyTorch +Example Code +```python +x = Variable(torch.ones(5, 5)) +y = Variable(torch.ones(5, 5) * 4) +z = x ** 2 + x * 2 + x * y + y +z.backward(torch.ones(5, 5)) +``` +The trace is done by `Variable.creator` and `Function.previous_functions`. +```python +class Variable(object): + def __init__(self, tensor, creator=None, requires_grad=True): + if creator is None: + creator = Leaf(self, requires_grad) + self.data = tensor + self.creator = creator + self._grad = None + + def backward(self, gradient=None): + if gradient is None: + if self.data.numel() != 1: + raise RuntimeError('backward should be called only on a scalar (i.e. 1-element tensor) or with gradient w.r.t. the variable') + gradient = self.data.new(1).fill_(1) + self._execution_engine.run_backward(self, gradient) + +class Function(obejct): + # ... + def _do_forward(self, *input): + unpacked_input = tuple(arg.data for arg in input) + raw_output = self.forward(*unpacked_input) + + # mark output.creator = self for backward trace + output = tuple(Variable(tensor, self) for tensor in raw_output) + + self.previous_functions = [(arg.creator, id(arg)) for arg in input] + self.output_ids = {id(var): i for i, var in enumerate(output)} + return output + + def _do_backward(self, grad_output): + return self.backwaerd(grad_output) +``` +The [backward](https://github.com/pytorch/pytorch/blob/v0.1.1/torch/autograd/engine.py) is similar to Autograd. + +#### DyNet +Example code +```python +model = dy.model() +W_p = model.add_parameters((20, 100)) +b_p = model.add_parameters(20) +E = model.add_lookup_parameters((20000, 50)) +for epoch in range(num_epochs): + for in_words, out_label in training_data: + dy.renew_cg() # init tape + W = dy.parameter(W_p) + b = dy.parameter(b_p) + score_sym = dy.softmax(W*dy.concatenate([E[in_words[0]],E[in_words[1]]])+b) + loss_sym = dy.pickneglogsoftmax(score_sym, out_label) + loss_val = loss_sym.value() + loss_sym.backward() +``` +[forward](https://github.com/clab/dynet/blob/740a9626a13a2732544de142e256ad0d0a166658/dynet/exec.cc#L84-L158), [backward](https://github.com/clab/dynet/blob/740a9626a13a2732544de142e256ad0d0a166658/dynet/exec.cc#L166-L284). The trace is done by creating a tape of expressions in every iteration. Backward is done by traverse the tape in the reverse order. +```c++ +void SimpleExecutionEngine::backward(VariableIndex from_where, bool full) { + ... + for (int i = num_nodes - 1; i >= 0; --i) { + // each node corresponds to an op + node->backward(xs, node_fx, node_dEdfx, ai, node_dEdxai); + } + ... +} +``` diff --git a/doc/survey/op_fusion_design.md b/doc/survey/op_fusion_design.md new file mode 100644 index 0000000000..d6e48f4f58 --- /dev/null +++ b/doc/survey/op_fusion_design.md @@ -0,0 +1,20 @@ +# Operator fusion +Fusing multiple operators together is an important method to optimize the program execution, particularly for GPU or other specialized accelerators. An obvious benefit is to avoid the overhead of saving the intermediate result back into global memory. + +There are generally two ways to fuse operators, fusing directly connected operators and fusing non directly connected operators. The first method is mainly used by [NNVM Compiler](https://github.com/dmlc/tvm/) and [XLA](https://www.tensorflow.org/performance/xla/). The second method is mainly used by Dynet and TensorFlow Fold to do auto-batching. The principle of fusing operator is according to some rules to combine multiple operations into one, for example, `Y = X * W` and `Z = Y + B` can be fused to `Z = X * W + B`, and `Y1 = X1 * W` and `Y2 = X2 * W` can be fused to `[Y1;Y2] = [X1;X2] * W`. In order to get a short-term profit, we decided to try to manually specify these rules. + +## Challenge +The challenge of fusing operators is: + - how to make the rules. + - how to implement these rules efficiently. + +### How to make the rules? + +The problem of determining the best single location for a fusion operator is an NP-hard combinatorial problem. After analysis the operators of the DL model, we found there are two group of operators can be fused explicitly, one is the simple and adjacent operations, for example, `tmp = x + y` and `z = Relu(tmp)`, and the other is the operators that have the same function, for example, a serials of `SGD` or `Momentum`. They usually appear in the model in a large number. So we should think about how to fuse them separately first. + +### How to implement these rules efficiently? +#### How to fuse the adjacent operations efficiently? +Here we use a template function to represent the fused operations. The pros of using a template function are that it is simple and efficient, and the cons are that it is not easy to expand, and it can only be used to express some simple operations. So taking into account our current needs, the template function is more appropriate. + +#### How to fuse the operators that have the same function efficiently? +We take SGD operator as an example, the training model may have hundreds of parameters and correspondingly have the same number of SGD operators. The expression(`w = w - lr*w_g`) of those operators is the same, so during of training, the executor will execute this expression hundreds time in CPU or other specialized accelerators. If we can fuse them and make the address of all `w` and all `w_g` continuous respectively, we only need execute one time. For some accelerators, the time of launching kernel is not neglected, so the time of hundreds of times of launching and executing kernel may be larger than launching and executing only once. There usually are many operators that similar to `SGD` in the DL model, such as `AllReduce` and `FC`. diff --git a/doc/templates/conf.py.cn.in b/doc/templates/conf.py.cn.in index 76b82fd97f..890f706155 100644 --- a/doc/templates/conf.py.cn.in +++ b/doc/templates/conf.py.cn.in @@ -16,8 +16,8 @@ import os, subprocess sys.path.insert(0, os.path.abspath('@PADDLE_BINARY_DIR@/python')) import shlex from recommonmark import parser, transform -import paddle -import paddle.v2 +@IMPORT_PADDLE_STRING@ +@IMPORT_PADDLEV2_STRING@ MarkdownParser = parser.CommonMarkParser AutoStructify = transform.AutoStructify diff --git a/doc/templates/conf.py.en.in b/doc/templates/conf.py.en.in index 5aa5c1381f..5b09464cb9 100644 --- a/doc/templates/conf.py.en.in +++ b/doc/templates/conf.py.en.in @@ -16,8 +16,8 @@ import os, subprocess sys.path.insert(0, os.path.abspath('@PADDLE_BINARY_DIR@/python')) import shlex from recommonmark import parser, transform -import paddle -import paddle.v2 +@IMPORT_PADDLE_STRING@ +@IMPORT_PADDLEV2_STRING@ MarkdownParser = parser.CommonMarkParser diff --git a/doc/v2/CMakeLists.txt b/doc/v2/CMakeLists.txt index be957d37b1..d230a1b921 100644 --- a/doc/v2/CMakeLists.txt +++ b/doc/v2/CMakeLists.txt @@ -15,6 +15,9 @@ set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") # HTML output director set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") +set(IMPORT_PADDLE_STRING "") +set(IMPORT_PADDLEV2_STRING "") + configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/../templates/conf.py.en.in" "${BINARY_BUILD_DIR_EN}/conf.py" @@ -27,8 +30,6 @@ sphinx_add_target(paddle_v2_docs ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_EN}) -add_dependencies(paddle_v2_docs gen_proto_py paddle_python) - # configured documentation tools and intermediate build results set(BINARY_BUILD_DIR_CN "${CMAKE_CURRENT_BINARY_DIR}/cn/_build") @@ -50,6 +51,4 @@ sphinx_add_target(paddle_v2_docs_cn ${CMAKE_CURRENT_SOURCE_DIR} ${SPHINX_HTML_DIR_CN}) -add_dependencies(paddle_v2_docs_cn gen_proto_py paddle_python) - add_subdirectory(api) diff --git a/doc/v2/api/CMakeLists.txt b/doc/v2/api/CMakeLists.txt index 2670a21a22..0c74522cb0 100644 --- a/doc/v2/api/CMakeLists.txt +++ b/doc/v2/api/CMakeLists.txt @@ -7,6 +7,9 @@ set(SPHINX_CACHE_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/_doctrees") # HTML output director set(SPHINX_HTML_DIR_EN "${CMAKE_CURRENT_BINARY_DIR}/en/html") +set(IMPORT_PADDLE_STRING "import paddle") +set(IMPORT_PADDLEV2_STRING "import paddle.v2") + configure_file( "${CMAKE_CURRENT_SOURCE_DIR}/../../templates/conf.py.en.in" "${BINARY_BUILD_DIR_EN}/conf.py" diff --git a/doc/v2/api/config/evaluators.rst b/doc/v2/api/config/evaluators.rst index 9ac972fb19..458d892e82 100644 --- a/doc/v2/api/config/evaluators.rst +++ b/doc/v2/api/config/evaluators.rst @@ -101,7 +101,7 @@ value_printer :noindex: Detection -===== +========== detection_map ------------- diff --git a/doc/v2/api/config/layer.rst b/doc/v2/api/config/layer.rst index 1a6496968c..5a0cfadfce 100644 --- a/doc/v2/api/config/layer.rst +++ b/doc/v2/api/config/layer.rst @@ -11,7 +11,7 @@ Data layer data ---- -.. autoclass:: paddle.v2.layer.data +.. autofunction:: paddle.v2.layer.data :noindex: Fully Connected Layers @@ -21,12 +21,12 @@ Fully Connected Layers fc -- -.. autoclass:: paddle.v2.layer.fc +.. autofunction:: paddle.v2.layer.fc :noindex: selective_fc ------------ -.. autoclass:: paddle.v2.layer.selective_fc +.. autofunction:: paddle.v2.layer.selective_fc :noindex: Conv Layers @@ -34,34 +34,34 @@ Conv Layers conv_operator ------------- -.. autoclass:: paddle.v2.layer.conv_operator +.. autofunction:: paddle.v2.layer.conv_operator :noindex: conv_projection --------------- -.. autoclass:: paddle.v2.layer.conv_projection +.. autofunction:: paddle.v2.layer.conv_projection :noindex: conv_shift ---------- -.. autoclass:: paddle.v2.layer.conv_shift +.. autofunction:: paddle.v2.layer.conv_shift :noindex: img_conv -------- -.. autoclass:: paddle.v2.layer.img_conv +.. autofunction:: paddle.v2.layer.img_conv :noindex: .. _api_v2.layer_context_projection: context_projection ------------------ -.. autoclass:: paddle.v2.layer.context_projection +.. autofunction:: paddle.v2.layer.context_projection :noindex: row_conv -------- -.. autoclass:: paddle.v2.layer.row_conv +.. autofunction:: paddle.v2.layer.row_conv :noindex: Image Pooling Layer @@ -69,27 +69,27 @@ Image Pooling Layer img_pool -------- -.. autoclass:: paddle.v2.layer.img_pool +.. autofunction:: paddle.v2.layer.img_pool :noindex: spp --- -.. autoclass:: paddle.v2.layer.spp +.. autofunction:: paddle.v2.layer.spp :noindex: maxout ------ -.. autoclass:: paddle.v2.layer.maxout +.. autofunction:: paddle.v2.layer.maxout :noindex: roi_pool -------- -.. autoclass:: paddle.v2.layer.roi_pool +.. autofunction:: paddle.v2.layer.roi_pool :noindex: pad ---- -.. autoclass:: paddle.v2.layer.pad +.. autofunction:: paddle.v2.layer.pad :noindex: Norm Layer @@ -97,27 +97,27 @@ Norm Layer img_cmrnorm ----------- -.. autoclass:: paddle.v2.layer.img_cmrnorm +.. autofunction:: paddle.v2.layer.img_cmrnorm :noindex: batch_norm ---------- -.. autoclass:: paddle.v2.layer.batch_norm +.. autofunction:: paddle.v2.layer.batch_norm :noindex: sum_to_one_norm --------------- -.. autoclass:: paddle.v2.layer.sum_to_one_norm +.. autofunction:: paddle.v2.layer.sum_to_one_norm :noindex: cross_channel_norm ------------------ -.. autoclass:: paddle.v2.layer.cross_channel_norm +.. autofunction:: paddle.v2.layer.cross_channel_norm :noindex: row_l2_norm ----------- -.. autoclass:: paddle.v2.layer.row_l2_norm +.. autofunction:: paddle.v2.layer.row_l2_norm :noindex: Recurrent Layers @@ -125,22 +125,22 @@ Recurrent Layers recurrent --------- -.. autoclass:: paddle.v2.layer.recurrent +.. autofunction:: paddle.v2.layer.recurrent :noindex: lstmemory --------- -.. autoclass:: paddle.v2.layer.lstmemory +.. autofunction:: paddle.v2.layer.lstmemory :noindex: grumemory --------- -.. autoclass:: paddle.v2.layer.grumemory +.. autofunction:: paddle.v2.layer.grumemory :noindex: gated_unit ----------- -.. autoclass:: paddle.v2.layer.gated_unit +.. autofunction:: paddle.v2.layer.gated_unit :noindex: Recurrent Layer Group @@ -148,32 +148,32 @@ Recurrent Layer Group memory ------ -.. autoclass:: paddle.v2.layer.memory +.. autofunction:: paddle.v2.layer.memory :noindex: recurrent_group --------------- -.. autoclass:: paddle.v2.layer.recurrent_group +.. autofunction:: paddle.v2.layer.recurrent_group :noindex: lstm_step --------- -.. autoclass:: paddle.v2.layer.lstm_step +.. autofunction:: paddle.v2.layer.lstm_step :noindex: gru_step -------- -.. autoclass:: paddle.v2.layer.gru_step +.. autofunction:: paddle.v2.layer.gru_step :noindex: beam_search ------------ -.. autoclass:: paddle.v2.layer.beam_search +.. autofunction:: paddle.v2.layer.beam_search :noindex: get_output ---------- -.. autoclass:: paddle.v2.layer.get_output +.. autofunction:: paddle.v2.layer.get_output :noindex: Mixed Layer @@ -183,54 +183,54 @@ Mixed Layer mixed ----- -.. autoclass:: paddle.v2.layer.mixed +.. autofunction:: paddle.v2.layer.mixed :noindex: .. _api_v2.layer_embedding: embedding --------- -.. autoclass:: paddle.v2.layer.embedding +.. autofunction:: paddle.v2.layer.embedding :noindex: scaling_projection ------------------ -.. autoclass:: paddle.v2.layer.scaling_projection +.. autofunction:: paddle.v2.layer.scaling_projection :noindex: dotmul_projection ----------------- -.. autoclass:: paddle.v2.layer.dotmul_projection +.. autofunction:: paddle.v2.layer.dotmul_projection :noindex: dotmul_operator --------------- -.. autoclass:: paddle.v2.layer.dotmul_operator +.. autofunction:: paddle.v2.layer.dotmul_operator :noindex: full_matrix_projection ---------------------- -.. autoclass:: paddle.v2.layer.full_matrix_projection +.. autofunction:: paddle.v2.layer.full_matrix_projection :noindex: identity_projection ------------------- -.. autoclass:: paddle.v2.layer.identity_projection +.. autofunction:: paddle.v2.layer.identity_projection :noindex: slice_projection ------------------- -.. autoclass:: paddle.v2.layer.slice_projection +.. autofunction:: paddle.v2.layer.slice_projection :noindex: table_projection ---------------- -.. autoclass:: paddle.v2.layer.table_projection +.. autofunction:: paddle.v2.layer.table_projection :noindex: trans_full_matrix_projection ---------------------------- -.. autoclass:: paddle.v2.layer.trans_full_matrix_projection +.. autofunction:: paddle.v2.layer.trans_full_matrix_projection :noindex: Aggregate Layers @@ -245,51 +245,46 @@ AggregateLevel pooling ------- -.. autoclass:: paddle.v2.layer.pooling +.. autofunction:: paddle.v2.layer.pooling :noindex: .. _api_v2.layer_last_seq: last_seq -------- -.. autoclass:: paddle.v2.layer.last_seq +.. autofunction:: paddle.v2.layer.last_seq :noindex: .. _api_v2.layer_first_seq: first_seq --------- -.. autoclass:: paddle.v2.layer.first_seq +.. autofunction:: paddle.v2.layer.first_seq :noindex: sub_seq --------- -.. autoclass:: paddle.v2.layer.sub_seq +.. autofunction:: paddle.v2.layer.sub_seq :noindex: concat ------ -.. autoclass:: paddle.v2.layer.concat +.. autofunction:: paddle.v2.layer.concat :noindex: seq_concat ---------- -.. autoclass:: paddle.v2.layer.seq_concat +.. autofunction:: paddle.v2.layer.seq_concat :noindex: seq_slice --------- -.. autoclass:: paddle.v2.layer.seq_slice - :noindex: - -kmax_sequence_score -------------------- -.. autoclass:: paddle.v2.layer.kmax_sequence_score +.. autofunction:: paddle.v2.layer.seq_slice :noindex: sub_nested_seq -------------- -.. autoclass:: paddle.v2.layer.sub_nested_seq +.. autofunction:: paddle.v2.layer.sub_nested_seq :noindex: Reshaping Layers @@ -297,7 +292,7 @@ Reshaping Layers block_expand ------------ -.. autoclass:: paddle.v2.layer.block_expand +.. autofunction:: paddle.v2.layer.block_expand :noindex: .. _api_v2.layer_expand: @@ -309,22 +304,22 @@ ExpandLevel expand ------ -.. autoclass:: paddle.v2.layer.expand +.. autofunction:: paddle.v2.layer.expand :noindex: repeat ------ -.. autoclass:: paddle.v2.layer.repeat +.. autofunction:: paddle.v2.layer.repeat :noindex: rotate ------ -.. autoclass:: paddle.v2.layer.rotate +.. autofunction:: paddle.v2.layer.rotate :noindex: seq_reshape ----------- -.. autoclass:: paddle.v2.layer.seq_reshape +.. autofunction:: paddle.v2.layer.seq_reshape :noindex: Math Layers @@ -332,94 +327,94 @@ Math Layers addto ----- -.. autoclass:: paddle.v2.layer.addto +.. autofunction:: paddle.v2.layer.addto :noindex: linear_comb ----------- -.. autoclass:: paddle.v2.layer.linear_comb +.. autofunction:: paddle.v2.layer.linear_comb :noindex: interpolation ------------- -.. autoclass:: paddle.v2.layer.interpolation +.. autofunction:: paddle.v2.layer.interpolation :noindex: bilinear_interp --------------- -.. autoclass:: paddle.v2.layer.bilinear_interp +.. autofunction:: paddle.v2.layer.bilinear_interp :noindex: dropout -------- -.. autoclass:: paddle.v2.layer.dropout +.. autofunction:: paddle.v2.layer.dropout :noindex: dot_prod --------- -.. autoclass:: paddle.v2.layer.dot_prod +.. autofunction:: paddle.v2.layer.dot_prod :noindex: out_prod -------- -.. autoclass:: paddle.v2.layer.out_prod +.. autofunction:: paddle.v2.layer.out_prod :noindex: power ----- -.. autoclass:: paddle.v2.layer.power +.. autofunction:: paddle.v2.layer.power :noindex: scaling ------- -.. autoclass:: paddle.v2.layer.scaling +.. autofunction:: paddle.v2.layer.scaling :noindex: clip ---- -.. autoclass:: paddle.v2.layer.clip +.. autofunction:: paddle.v2.layer.clip :noindex: resize ------ -.. autoclass:: paddle.v2.layer.resize +.. autofunction:: paddle.v2.layer.resize :noindex: slope_intercept --------------- -.. autoclass:: paddle.v2.layer.slope_intercept +.. autofunction:: paddle.v2.layer.slope_intercept :noindex: tensor ------ -.. autoclass:: paddle.v2.layer.tensor +.. autofunction:: paddle.v2.layer.tensor :noindex: .. _api_v2.layer_cos_sim: cos_sim ------- -.. autoclass:: paddle.v2.layer.cos_sim +.. autofunction:: paddle.v2.layer.cos_sim :noindex: l2_distance ----------- -.. autoclass:: paddle.v2.layer.l2_distance +.. autofunction:: paddle.v2.layer.l2_distance :noindex: trans ----- -.. autoclass:: paddle.v2.layer.trans +.. autofunction:: paddle.v2.layer.trans :noindex: scale_shift ----------- -.. autoclass:: paddle.v2.layer.scale_shift +.. autofunction:: paddle.v2.layer.scale_shift :noindex: factorization_machine --------------------- -.. autoclass:: paddle.v2.layer.factorization_machine +.. autofunction:: paddle.v2.layer.factorization_machine :noindex: Sampling Layers @@ -427,17 +422,17 @@ Sampling Layers maxid ----- -.. autoclass:: paddle.v2.layer.max_id +.. autofunction:: paddle.v2.layer.max_id :noindex: sampling_id ----------- -.. autoclass:: paddle.v2.layer.sampling_id +.. autofunction:: paddle.v2.layer.sampling_id :noindex: multiplex --------- -.. autoclass:: paddle.v2.layer.multiplex +.. autofunction:: paddle.v2.layer.multiplex :noindex: .. _api_v2.layer_costs: @@ -447,97 +442,97 @@ Cost Layers cross_entropy_cost ------------------ -.. autoclass:: paddle.v2.layer.cross_entropy_cost +.. autofunction:: paddle.v2.layer.cross_entropy_cost :noindex: cross_entropy_with_selfnorm_cost -------------------------------- -.. autoclass:: paddle.v2.layer.cross_entropy_with_selfnorm_cost +.. autofunction:: paddle.v2.layer.cross_entropy_with_selfnorm_cost :noindex: multi_binary_label_cross_entropy_cost ------------------------------------- -.. autoclass:: paddle.v2.layer.multi_binary_label_cross_entropy_cost +.. autofunction:: paddle.v2.layer.multi_binary_label_cross_entropy_cost :noindex: classification_cost ------------------- -.. autoclass:: paddle.v2.layer.classification_cost +.. autofunction:: paddle.v2.layer.classification_cost :noindex: huber_regression_cost ------------------------- -.. autoclass:: paddle.v2.layer.huber_regression_cost +.. autofunction:: paddle.v2.layer.huber_regression_cost :noindex: huber_classification_cost ------------------------- -.. autoclass:: paddle.v2.layer.huber_classification_cost +.. autofunction:: paddle.v2.layer.huber_classification_cost :noindex: lambda_cost ----------- -.. autoclass:: paddle.v2.layer.lambda_cost +.. autofunction:: paddle.v2.layer.lambda_cost :noindex: square_error_cost ----------------- -.. autoclass:: paddle.v2.layer.square_error_cost +.. autofunction:: paddle.v2.layer.square_error_cost :noindex: rank_cost --------- -.. autoclass:: paddle.v2.layer.rank_cost +.. autofunction:: paddle.v2.layer.rank_cost :noindex: sum_cost --------- -.. autoclass:: paddle.v2.layer.sum_cost +.. autofunction:: paddle.v2.layer.sum_cost :noindex: crf --- -.. autoclass:: paddle.v2.layer.crf +.. autofunction:: paddle.v2.layer.crf :noindex: crf_decoding ------------ -.. autoclass:: paddle.v2.layer.crf_decoding +.. autofunction:: paddle.v2.layer.crf_decoding :noindex: ctc --- -.. autoclass:: paddle.v2.layer.ctc +.. autofunction:: paddle.v2.layer.ctc :noindex: warp_ctc -------- -.. autoclass:: paddle.v2.layer.warp_ctc +.. autofunction:: paddle.v2.layer.warp_ctc :noindex: nce --- -.. autoclass:: paddle.v2.layer.nce +.. autofunction:: paddle.v2.layer.nce :noindex: hsigmoid --------- -.. autoclass:: paddle.v2.layer.hsigmoid +.. autofunction:: paddle.v2.layer.hsigmoid :noindex: smooth_l1_cost -------------- -.. autoclass:: paddle.v2.layer.smooth_l1_cost +.. autofunction:: paddle.v2.layer.smooth_l1_cost :noindex: multibox_loss -------------- -.. autoclass:: paddle.v2.layer.multibox_loss +.. autofunction:: paddle.v2.layer.multibox_loss :noindex: detection_output ---------------- -.. autoclass:: paddle.v2.layer.detection_output +.. autofunction:: paddle.v2.layer.detection_output :noindex: Check Layer @@ -545,7 +540,7 @@ Check Layer eos --- -.. autoclass:: paddle.v2.layer.eos +.. autofunction:: paddle.v2.layer.eos :noindex: Activation @@ -553,5 +548,5 @@ Activation prelu -------- -.. autoclass:: paddle.v2.layer.prelu +.. autofunction:: paddle.v2.layer.prelu :noindex: diff --git a/doc/v2/api/index_en.rst b/doc/v2/api/index_en.rst index b11cd449af..5813509dce 100644 --- a/doc/v2/api/index_en.rst +++ b/doc/v2/api/index_en.rst @@ -4,8 +4,6 @@ API .. toctree:: :maxdepth: 1 - overview.rst model_configs.rst data.rst run_logic.rst - fluid/index.rst diff --git a/doc/v2/build_and_install/build_from_source_cn.rst b/doc/v2/build_and_install/build_from_source_cn.rst index 115b92a338..d0dacb104f 100644 --- a/doc/v2/build_and_install/build_from_source_cn.rst +++ b/doc/v2/build_and_install/build_from_source_cn.rst @@ -19,10 +19,11 @@ ---------------- PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安装编译依赖的步骤,可选的不同编译环境Docker镜像 -可以在 `这里 `_ 找到。或者 -参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。 +可以在 `这里 `__ 找到,您也可以 +在 `这里 `__ 找到 paddle_manylinux_devel +镜像的编译以及使用方法。或者参考下述可选步骤,从源码中构建用于编译PaddlePaddle的Docker镜像。 -如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 `编译依赖`_ 之后才能开始编译的步骤。 +如果您选择不使用Docker镜像,则需要在本机安装下面章节列出的 :ref:`编译依赖 <_compile_deps>` 之后才能开始编译的步骤。 编译PaddlePaddle,需要执行: @@ -34,13 +35,16 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 # 2. 可选步骤:源码中构建用于编译PaddlePaddle的Docker镜像 docker build -t paddle:dev . # 3. 执行下面的命令编译CPU-Only的二进制 - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh + docker run -it -v $PWD:/paddle -w /paddle -e "PYTHON_ABI=cp27-cp27mu" -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 ./paddle/scripts/paddle_build.sh build # 4. 或者也可以使用为上述可选步骤构建的镜像(必须先执行第2步) - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddle:dev + docker run -it -v $PWD:/paddle -w /paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddle:dev ./paddle/scripts/paddle_build.sh build -注:上述命令把当前目录(源码树根目录)映射为 container 里的 :code:`/paddle` 目录。如果使用自行 -构建的镜像(上述第4步)会执行 :code:`Dockerfile` 描述的默认入口程序 :code:`build.sh` 可以省略步骤3中 -最后的执行脚本的命令。 +注: + +- 上述命令把当前目录(源码树根目录)映射为 container 里的 :code:`/paddle` 目录。 + +- 如果您使用的是 manylinux 的镜像进行编译, 那么您需要通过环境变量 :code:`PYTHON_ABI` 来指定一个 `Python ABI `__. +PaddlePaddle目前支持的 Python ABI 有 :code:`cp27-cp27m` 和 :code:`cp27-cp27mu`. 编译完成后会在build/python/dist目录下生成输出的whl包,可以选在在当前机器安装也可以拷贝到目标机器安装: @@ -71,15 +75,15 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 .. code-block:: bash - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh + docker run -it -v $PWD:/paddle -w /paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 ./paddle/scripts/paddle_build.sh test 如果期望执行其中一个单元测试,(比如 :code:`test_sum_op` ): .. code-block:: bash - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 /bin/bash - bash /paddle/paddle/scripts/docker/build.sh - cd /paddle/build + docker run -it -v $PWD:/paddle -w /paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 /bin/bash + ./paddle/scripts/paddle_build.sh build + cd build ctest -R test_sum_op -V .. _faq_docker: @@ -107,7 +111,7 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 - 学习 Docker 有多难? - 理解 Docker 并不难,大概花十分钟看一下[这篇文章](https://zhuanlan.zhihu.com/p/19902938)。这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 + 理解 Docker 并不难,大概花十分钟看一下 `如何使用Docker `_ 。这可以帮您省掉花一小时安装和配置各种开发工具,以及切换机器时需要新安装的辛苦。别忘了 PaddlePaddle 更新可能导致需要新的开发工具。更别提简化问题复现带来的好处了。 - 我可以用 IDE 吗? @@ -115,17 +119,16 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 很多 PaddlePaddle 开发者使用 Emacs。他们在自己的 `~/.emacs` 配置文件里加两行 - ```emacs - (global-set-key "\C-cc" 'compile) - (setq compile-command - "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") - ``` + .. code-block:: emacs + + (global-set-key "\C-cc" 'compile) + (setq compile-command "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") 就可以按 `Ctrl-C` 和 `c` 键来启动编译了。 - 可以并行编译吗? - 是的。我们的 Docker image 运行一个 [Bash 脚本](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh)。这个脚本调用 `make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 + 是的。我们的 Docker image 运行一个 `Paddle编译Bash脚本 `_ 。这个脚本调用 `make -j$(nproc)` 来启动和 CPU 核一样多的进程来并行编译。 - Docker 需要 sudo @@ -133,11 +136,11 @@ PaddlePaddle需要使用Docker环境完成编译,这样可以免去单独安 - 在 Windows/MacOS 上编译很慢 - Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考[这个issue](https://github.com/PaddlePaddle/Paddle/issues/627)。 + Docker 在 Windows 和 MacOS 都可以运行。不过实际上是运行在一个 Linux 虚拟机上。可能需要注意给这个虚拟机多分配一些 CPU 和内存,以保证编译高效。具体做法请参考 `如何为Windows/Mac计算机上的Docker增加内存和虚拟机 `_ 。 - 磁盘不够 - 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考[这篇文章](https://zaiste.net/posts/removing_docker_containers/)来清理这些内容。 + 本文中的例子里,`docker run` 命令里都用了 `--rm` 参数,这样保证运行结束之后的 containers 不会保留在磁盘上。可以用 `docker ps -a` 命令看到停止后但是没有删除的 containers。`docker build` 命令有时候会产生一些中间结果,是没有名字的 images,也会占用磁盘。可以参考 `如何删除Docker Container `_ 来清理这些内容。 .. _compile_deps: @@ -197,7 +200,7 @@ BLAS PaddlePaddle支持 `MKL `_ 和 `OpenBlAS `_ 两种BLAS库。默认使用MKL。如果使用MKL并且机器含有AVX2指令集, -还会下载MKL-DNN数学库,详细参考 `这里 `_ 。 +还会下载MKL-DNN数学库,详细参考 `mkldnn设计文档 `_ 。 如果关闭MKL,则会使用OpenBLAS作为BLAS库。 @@ -213,7 +216,7 @@ PaddlePaddle可以使用cuDNN v5.1之后的任何一个版本来编译运行, 编译选项的设置 ++++++++++++++ -PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( :code:`/usr/lib:/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 +PaddePaddle通过编译时指定路径来实现引用各种BLAS/CUDA/cuDNN库。cmake编译时,首先在系统路径( :code:`/usr/lib:/usr/local/lib` )中搜索这几个库,同时也会读取相关路径变量来进行搜索。 通过使用 ``-D`` 命令可以设置,例如 .. code-block:: bash diff --git a/doc/v2/build_and_install/build_from_source_en.rst b/doc/v2/build_and_install/build_from_source_en.rst index 8fef9e7347..664b68da8b 100644 --- a/doc/v2/build_and_install/build_from_source_en.rst +++ b/doc/v2/build_and_install/build_from_source_en.rst @@ -11,7 +11,7 @@ To build PaddlePaddle, you need 1. A computer -- Linux, Windows, MacOS. 2. Docker. -Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image. +Nothing else. Not even Python and GCC, because you can install all build tools into a Docker image. We run all the tools by running this image. .. _build_step: @@ -22,8 +22,12 @@ How To Build You need to use Docker to build PaddlePaddle to avoid installing dependencies by yourself. We have several pre-built Docker images `here `_ , +you can also find how to build and use paddle_manylinux_devel Docker image from +`here `__ Or you can build your own image from source as the optional step below: +If you don't wish to use docker,you need to install several compile dependencies manually as :ref:`Compile Dependencies <_compile_deps>` shows to start compilation. + .. code-block:: bash # 1. clone the source code @@ -32,14 +36,17 @@ Or you can build your own image from source as the optional step below: # 2. Optional: build development docker image from source docker build -t paddle:dev . # 3. Run the following command to build a CPU-Only binaries - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x /paddle/paddle/scripts/docker/build.sh + docker run -it -v $PWD:/paddle -w /paddle -e "PYTHON_ABI=cp27-cp27mu" -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 ./paddle/scripts/paddle_build.sh build # 4. Or, use your built Docker image to build PaddlePaddle (must run step 2) - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddle:dev + docker run -it -v $PWD:/paddle -w /paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=OFF" paddle:dev ./paddle/scripts/paddle_build.sh build + +NOTE: -NOTE: The above command try to mount the current working directory (root directory of source code) -into :code:`/paddle` directory inside docker container. If you are using your own image -(Step 4) it will run default entry-point :code:`build.sh` , so you could omit the last -command in step 3. +- The above command try to mount the current working directory (root directory of source code) +into :code:`/paddle` directory inside docker container. + +- You need to pass in the required environment variable :code:`PYTHON_ABI` to specify a `Python ABI `__. +Currently PaddlePaddle supported Python ABIs include :code:`cp27-cp27m` and :code:`cp27-cp27mu` . When the compile finishes, you can get the output whl package under build/python/dist, then you can choose to install the whl on local @@ -72,21 +79,21 @@ Set :code:`WITH_GPU=ON` Can also run tests on GPU. .. code-block:: bash - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 bash -x paddle/paddle/scripts/docker/build.sh + docker run -it -v $PWD:/paddle -w /paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=ON" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 ./paddle/scripts/paddle_build.sh test If you wish to run only one unit test, like :code:`test_sum_op`: .. code-block:: bash - docker run -it -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 /bin/bash - bash /paddle/paddle/scripts/docker/build.sh - cd /paddle/build + docker run -it -v $PWD:/paddle -w /paddle -e "WITH_GPU=OFF" -e "WITH_TESTING=ON" -e "RUN_TEST=OFF" paddlepaddle/paddle_manylinux_devel:cuda8.0_cudnn5 /bin/bash + ./paddle/scripts/paddle_build.sh build + cd build ctest -R test_sum_op -V .. _faq_docker: Frequently Asked Questions ----------------- +--------------------------- - What is Docker? @@ -108,7 +115,7 @@ Frequently Asked Questions - How difficult is it to learn Docker? - It takes you ten minutes to read [an introductory article](https://docs.docker.com/get-started) and saves you more than one hour to install all required build tools, configure them, especially when new versions of PaddlePaddle require some new tools. Not even to mention the time saved when other people trying to reproduce the issue you have. + It takes you ten minutes to read `an introductory article `_ and saves you more than one hour to install all required build tools, configure them, especially when new versions of PaddlePaddle require some new tools. Not even to mention the time saved when other people trying to reproduce the issue you have. - Can I use my favorite IDE? @@ -116,17 +123,16 @@ Frequently Asked Questions Many PaddlePaddle developers are using Emacs. They add the following few lines into their `~/.emacs` configure file: - ```emacs - (global-set-key "\C-cc" 'compile) - (setq compile-command - "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") - ``` + .. code-block:: emacs + + (global-set-key "\C-cc" 'compile) + (setq compile-command "docker run --rm -it -v $(git rev-parse --show-toplevel):/paddle paddle:dev") so they could type `Ctrl-C` and `c` to build PaddlePaddle from source. - Does Docker do parallel building? - Our building Docker image runs a [Bash script](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/scripts/docker/build.sh), which calls `make -j$(nproc)` to starts as many processes as the number of your CPU cores. + Our building Docker image runs a `Bash script `_ , which calls `make -j$(nproc)` to starts as many processes as the number of your CPU cores. - Docker requires sudo @@ -134,16 +140,16 @@ Frequently Asked Questions - Docker on Windows/MacOS builds slowly - On Windows and MacOS, Docker containers run in a Linux VM. You might want to give this VM some more memory and CPUs so to make the building efficient. Please refer to [this issue](https://github.com/PaddlePaddle/Paddle/issues/627) for details. + On Windows and MacOS, Docker containers run in a Linux VM. You might want to give this VM some more memory and CPUs so to make the building efficient. Please refer to `this issue `_ for details. - Not enough disk space - Examples in this article use option `--rm` with the `docker run` command. This option ensures that stopped containers do not exist on hard disks. We can use `docker ps -a` to list all containers, including stopped. Sometimes `docker build` generates some intermediate dangling images, which also take disk space. To clean them, please refer to [this article](https://zaiste.net/posts/removing_docker_containers/). + Examples in this article use option `--rm` with the `docker run` command. This option ensures that stopped containers do not exist on hard disks. We can use `docker ps -a` to list all containers, including stopped. Sometimes `docker build` generates some intermediate dangling images, which also take disk space. To clean them, please refer to `this article `_ . .. _compile_deps: Appendix: Compile Dependencies ----------------- +------------------------------- PaddlePaddle need the following dependencies when compiling, other dependencies will be downloaded automatically. @@ -164,11 +170,11 @@ will be downloaded automatically. .. _build_options: Appendix: Build Options ----------------- +------------------------- Build options include whether build binaries for CPU or GPU, which BLAS library to use etc. You may pass these settings when running cmake. -For detailed cmake tutorial please refer to `here `_ 。 +For detailed cmake tutorial please refer to `here `__ 。 You can add :code:`-D` argument to pass such options, like: @@ -217,7 +223,7 @@ keep on with latest cuDNN versions. Be sure to run with the same version of cuDN you built. Pass Compile Options -++++++++++++++ +++++++++++++++++++++++ You can pass compile options to use intended BLAS/CUDA/Cudnn libraries. When running cmake command, it will search system paths like diff --git a/doc/v2/build_and_install/docker_install_cn.rst b/doc/v2/build_and_install/docker_install_cn.rst index 79d214635a..106c86bace 100644 --- a/doc/v2/build_and_install/docker_install_cn.rst +++ b/doc/v2/build_and_install/docker_install_cn.rst @@ -73,6 +73,7 @@ 当然,您也可以进入到Docker容器中,以交互式的方式执行或调试您的代码: .. code-block:: bash + docker run -it -v $PWD:/work paddlepaddle/paddle /bin/bash cd /work python train.py @@ -97,7 +98,7 @@ PaddlePaddle Book是为用户和开发者制作的一个交互式的Jupyter Note 国内用户可以使用下面的镜像源来加速访问: - .. code-block: bash + .. code-block:: bash docker run -p 8888:8888 docker.paddlepaddlehub.com/book diff --git a/doc/v2/build_and_install/docker_install_en.rst b/doc/v2/build_and_install/docker_install_en.rst index e0e0559fb8..25aecb8d0d 100644 --- a/doc/v2/build_and_install/docker_install_en.rst +++ b/doc/v2/build_and_install/docker_install_en.rst @@ -80,6 +80,7 @@ Also, you can go into the container shell, run or debug your code interactively: .. code-block:: bash + docker run -it -v $PWD:/work paddlepaddle/paddle /bin/bash cd /work python train.py @@ -104,7 +105,7 @@ We provide a packaged book image, simply issue the command: For users in China, we provide a faster mirror: - .. code-block: bash + .. code-block:: bash docker run -p 8888:8888 docker.paddlepaddlehub.com/book diff --git a/doc/v2/build_and_install/index_cn.rst b/doc/v2/build_and_install/index_cn.rst index e079bb661f..1a9305ac4b 100644 --- a/doc/v2/build_and_install/index_cn.rst +++ b/doc/v2/build_and_install/index_cn.rst @@ -6,7 +6,7 @@ PaddlePaddle针对不同的用户群体提供了多种安装方式。 专注深度学习模型开发 ------------------ +-------------------- PaddlePaddle提供了多种python wheel包,可通过pip一键安装: @@ -18,7 +18,7 @@ PaddlePaddle提供了多种python wheel包,可通过pip一键安装: 这是最便捷的安装方式,请根据机器配置和系统选择对应的安装包。 关注底层框架 ----------- +------------- PaddlePaddle提供了基于Docker的安装方式,请参照以下教程: @@ -45,7 +45,7 @@ PaddlePaddle提供了基于Docker的安装方式,请参照以下教程: 常见问题汇总 ------------ +-------------- 如果在安装过程中遇到了问题,请先尝试在下面的页面寻找答案: diff --git a/doc/v2/build_and_install/index_en.rst b/doc/v2/build_and_install/index_en.rst index 5b3de0f8c3..7990bacbd6 100644 --- a/doc/v2/build_and_install/index_en.rst +++ b/doc/v2/build_and_install/index_en.rst @@ -1,12 +1,12 @@ install and Compile -========== +====================== .. _install_steps: PaddlePaddle provides various methods of installation for many different users Focus on Deep Learning Model Development ------------------ +---------------------------------------- PaddlePaddle provides lots of packages of python wheel , that pip can install: @@ -18,7 +18,7 @@ PaddlePaddle provides lots of packages of python wheel , that pip can install: This is the most convenient way of installation. Please choose the right installation package with machine configure and system. Follow the Bottom Frame ----------- +------------------------ PaddlePaddle also supports installation using Docker. Please refer to the tutorial below: diff --git a/doc/v2/build_and_install/pip_install_cn.rst b/doc/v2/build_and_install/pip_install_cn.rst index b3d8827437..095da19cd4 100644 --- a/doc/v2/build_and_install/pip_install_cn.rst +++ b/doc/v2/build_and_install/pip_install_cn.rst @@ -10,20 +10,38 @@ PaddlePaddle可以使用常用的Python包管理工具 使用pip安装 ------------------------------ - -执行下面的命令即可在当前机器上安装PaddlePaddle的运行时环境,并自动下载安装依赖软件,版本为cpu_avx_openblas。 +执行下面的命令即可在当前机器上安装PaddlePaddle的运行时环境,并自动下载安装依赖软件。 .. code-block:: bash pip install paddlepaddle +当前的默认版本为0.12.0,cpu_avx_openblas,您可以通过指定版本号来安装其它版本,例如: + + .. code-block:: bash + + pip install paddlepaddle==0.11.0 + -如果需要安装支持GPU的版本(cuda7.5_cudnn5_avx_openblas),需要执行: +如果需要安装支持GPU的版本(cuda8.0_cudnn5_avx_openblas),需要执行: .. code-block:: bash pip install paddlepaddle-gpu +当前的默认版本也是0.12.0,PaddlePaddle针对不同需求提供了更多版本的安装包,部分列表如下: + +================================= ======================================== +版本号 版本说明 +================================= ======================================== +paddlepaddle-gpu==0.12.0 使用CUDA 8.0和cuDNN 5编译的0.12.0版本 +paddlepaddle-gpu==0.11.0.post87 使用CUDA 8.0和cuDNN 7编译的0.11.0版本 +paddlepaddle-gpu==0.11.0.post8 使用CUDA 8.0和cuDNN 5编译的0.11.0版本 +paddlepaddle-gpu==0.11.0 使用CUDA 7.5和cuDNN 5编译的0.11.0版本 +================================= ======================================== + +您可以在 `Release History `_ 中找到paddlepaddle-gpu的各个发行版本。 + 如果需要获取并安装最新的(开发分支)PaddlePaddle,可以从我们的CI系统中下载最新的whl安装包和c-api开发包并安装, 您可以从下面的表格中找到需要的版本: @@ -37,12 +55,12 @@ PaddlePaddle可以使用常用的Python包管理工具 :header: "版本说明", "cp27-cp27mu", "cp27-cp27m" :widths: 1, 3, 3 - "cpu_avx_mkl", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cpu_avx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cpu_noavx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cuda7.5_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_" + "cpu_avx_mkl", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl `__" + "cpu_avx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl `__" + "cpu_noavx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl `_" + "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl `__" + "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl `__" + "cuda9.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl `__" .. _pip_dependency: @@ -69,7 +87,7 @@ PaddlePaddle发布的安装包会尽量对齐 `manylinux1 9.0.0) 才可以安装。可以使用下面的命令更新您的pip: .. code-block:: bash diff --git a/doc/v2/build_and_install/pip_install_en.rst b/doc/v2/build_and_install/pip_install_en.rst index 1e409d86b9..8406e4aa1f 100644 --- a/doc/v2/build_and_install/pip_install_en.rst +++ b/doc/v2/build_and_install/pip_install_en.rst @@ -12,20 +12,38 @@ Install using pip ------------------------------ Run the following command to install PaddlePaddle on the current -machine, it will also download requirements, the version is cpu_avx_openblas. +machine, it will also download requirements. .. code-block:: bash pip install paddlepaddle +the default version is 0.12.0, cpu_avx_openblas, you can specify the versions to satisfy your demands, like: -If you wish to install GPU version (cuda7.5_cudnn5_avx_openblas), just run: + .. code-block:: bash + + pip install paddlepaddle==0.11.0 + +If you need to install a GPU-enabled version (cuda8.0_cudnn5_avx_openblas), you need to run: .. code-block:: bash pip install paddlepaddle-gpu -If you wish to install the latest develop branch PaddlePaddle, +The default version is also 0.12.0, PaddlePaddle provides several versions of packages for different needs, as shown in the table: + +================================= ======================================== +版本号 版本说明 +================================= ======================================== +paddlepaddle-gpu==0.12.0 0.12.0 built with CUDA 8.0 and cuDNN 5 +paddlepaddle-gpu==0.11.0.post87 0.11.0 built with CUDA 8.0 and cuDNN 7 +paddlepaddle-gpu==0.11.0.post8 0.11.0 built with CUDA 8.0 and cuDNN 5 +paddlepaddle-gpu==0.11.0 0.11.0 built with CUDA 7.5 and cuDNN 5 +================================= ======================================== + +You can find all versions released of paddlepaddle-gpu in `Release History `_ . + +If you wish to install the latest develop branch PaddlePaddle, you can download the latest whl package from our CI system. Access the below links, log in as guest, then click at the "Artifact" tab, you'll find the download link of whl packages. @@ -40,12 +58,12 @@ If the links below shows up the login form, just click "Log in as guest" to star :header: "version", "cp27-cp27mu", "cp27-cp27m" :widths: 1, 3, 3 - "cpu_avx_mkl", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cpu_avx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cpu_noavx_openblas", "`paddlepaddle-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cuda7.5_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_" - "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-0.11.0-cp27-cp27mu-linux_x86_64.whl `_", "`paddlepaddle_gpu-0.11.0-cp27-cp27m-linux_x86_64.whl `_" + "cpu_avx_mkl", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl `__" + "cpu_avx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl `__" + "cpu_noavx_openblas", "`paddlepaddle-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle-latest-cp27-cp27m-linux_x86_64.whl `__" + "cuda8.0_cudnn5_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl `__" + "cuda8.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl `__" + "cuda9.0_cudnn7_avx_mkl", "`paddlepaddle_gpu-latest-cp27-cp27mu-linux_x86_64.whl `__", "`paddlepaddle_gpu-latest-cp27-cp27m-linux_x86_64.whl `__" .. _pip_dependency: @@ -79,7 +97,7 @@ FAQ ------------------------------ - paddlepaddle*.whl is not a supported wheel on this platform. - + The main cause of this issue is that your current platform is not supported. Please check that you are using Python 2.7 series. Besides, pypi only supports manylinux1 standard, you'll need to diff --git a/doc/v2/design/cluster_train/large_model_dist_train.md b/doc/v2/design/cluster_train/large_model_dist_train.md index 0c4b5bc24c..edb0245ea0 100644 --- a/doc/v2/design/cluster_train/large_model_dist_train.md +++ b/doc/v2/design/cluster_train/large_model_dist_train.md @@ -52,7 +52,7 @@ In `trainer_internal.cpp:L93 trainOneBatch`: When doing actual network forward and backward, at the beginning of each batch, the trainer will try to download one row of data from pserver. -In `trainer/RemoteParameterUpdater.cpp`: `parameterUpdater_->getParametersRemote();`: +In `legacy/trainer/RemoteParameterUpdater.cpp`: `parameterUpdater_->getParametersRemote();`: ```c++ if (fullSize) { diff --git a/doc/v2/design/interface/00.why_plain_c.md b/doc/v2/design/interface/00.why_plain_c.md index a144309334..826ff3141b 100644 --- a/doc/v2/design/interface/00.why_plain_c.md +++ b/doc/v2/design/interface/00.why_plain_c.md @@ -65,7 +65,7 @@ paddle_error paddle_matrix_get_shape(paddle_matrix matrix, 而在CPP里面实现这个C的接口,文件 `paddle_matrix.cpp` ```cpp -#include "paddle/math/matrix.h" +#include "paddle/legacy/math/matrix.h" extern "C" paddle_error paddle_matrix_shape(paddle_matrix matrix, uint64_t *width, diff --git a/doc/v2/design/mkl/mkldnn.md b/doc/v2/design/mkl/mkldnn.md index 1bd2e7bc34..4876de0045 100644 --- a/doc/v2/design/mkl/mkldnn.md +++ b/doc/v2/design/mkl/mkldnn.md @@ -5,7 +5,7 @@ 充分展现英特尔平台的优势,有效提升PaddlePaddle在英特尔架构上的性能。
-
+
Figure 1. PaddlePaddle on IA
@@ -18,20 +18,20 @@ Figure 1. PaddlePaddle on IA 具体的完成状态可以参见[这里](https://github.com/PaddlePaddle/Paddle/projects/21)。 ## Contents - -- [Overview](#overview) -- [Actions](#actions) - - [CMake](#cmake) - - [Matrix](#matrix) - - [Layers](#layers) - - [Activations](#activations) - - [Parameters](#parameters) - - [Gradients](#gradients) - - [Unit Tests](#unit-tests) - - [Python API](#python-api) - - [Benchmarking](#benchmarking) - - [Others](#others) -- [Design Concerns](#design-concerns) + +- [Overview](#overview) +- [Actions](#actions) + - [CMake](#cmake) + - [Matrix](#matrix) + - [Layers](#layers) + - [Activations](#activations) + - [Parameters](#parameters) + - [Gradients](#gradients) + - [Unit Tests](#unit-tests) + - [Python API](#python-api) + - [Benchmarking](#benchmarking) + - [Others](#others) +- [Design Concerns](#design-concerns) ## Overview @@ -42,16 +42,43 @@ Figure 1. PaddlePaddle on IA MKL,MKLML以及MKL-DNN三者关系如下表: -| Name | Open Source | License | Descriptions | -| :---------- | :--------------- | :---------- | :------------ | -| MKL | No | Proprietary | Accelerate math processing routines | -| MKLML | No | Proprietary | Small package of MKL, especially for Machine Learning | -| MKL-DNN | Yes | Apache 2.0 | Accelerate primitives processing routines especially for Deep Neural Networks | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameOpen SourceLicenseDescriptions
MKLNoProprietaryAccelerate math processing routines
MKLMLNoProprietarySmall package of MKL, especially for Machine Learning
MKL-DNNYesApache 2.0Accelerate primitives processing routines especially for Deep Neural Networks
MKLML可以与MKL-DNN共同使用,以此达到最好的性能。
-
+
Figure 2. PaddlePaddle with MKL Engines
@@ -103,7 +130,7 @@ MKL-DNN的库目前只有动态库`libmkldnn.so`。 所以我们定义了一个`MKLDNNMatrix`用于管理MKL-DNN数据的不同格式以及相互之间的转换。
-
+
Figure 3. MKLDNNMatrix
@@ -113,7 +140,7 @@ Figure 3. MKLDNNMatrix 子类只需要使用定义好的接口,实现具体的函数功能即可。
-
+
Figure 4. MKLDNNLayer
@@ -150,7 +177,7 @@ Figure 4. MKLDNNLayer 所以整体上,在实现每个子类的时候就不需要关心分支的事情了。
-
+
Figure 5. Merge Gradients
@@ -191,20 +218,20 @@ if use_mkldnn 我们总结出一些特别需要注意的点: 1. 使用**deviceId_**。为了尽可能少的在父类Layer中添加变量或者函数, -我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。 -2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。 +我们决定使用已有的`deviceId_`变量来区分layer的属性,定义`-2`为`MKLDNNLayer`特有的设备ID。 +2. 重写父类Layer的**init**函数,修改`deviceId_`为`-2`,代表这个layer是用于跑在MKL-DNN的环境下。 3. 创建`MKLDNNBase`,定义一些除了layer和memory相关的类和函数。 -包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。 +包括MKL-DNN会用到`MKLDNNStream`和`CPUEngine`,和未来可能还会用到`FPGAEngine`等。 4. 如果MKL-DNN layer的后面接有cpu device,那么就会使`output_.value`与`extOutVal_`共享内存, 同时数据格式就是`NCHW`,这样下一个cpu device就能拿到正确的数据。 在有普通的CPU layer时, `extOutVal_`和`extOutGrad_`的格式始终是`NCHW`或者`NC`。 ## References 1. [MKL small library](https://github.com/01org/mkl-dnn#linking-your-application)是[Intel MKL](https://software.intel.com/en-us/mkl)的一个子集。 -主要包括了深度学习相关的数学原语与操作,一般由MKL-DNN在发布[新版本](https://github.com/01org/mkl-dnn/releases)时一起更新。 +主要包括了深度学习相关的数学原语与操作,一般由MKL-DNN在发布[新版本](https://github.com/01org/mkl-dnn/releases)时一起更新。 2. [MKL-DNN System Requirements](https://github.com/01org/mkl-dnn#system-requirements)。 目前在PaddlePaddle中,仅会在支持AVX2指令集及以上的机器才使用MKL-DNN。 3. [原来的方案](https://github.com/PaddlePaddle/Paddle/pull/3096)会引入**nextLayer**的信息。 -但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 +但是在PaddlePaddle中,无论是重构前的layer还是重构后的op,都不会想要知道next layer/op的信息。 4. MKL-DNN的高性能格式与PaddlePaddle原有的`NCHW`不同(PaddlePaddle中的cuDNN部分使用的也是`NCHW`,所以不存在这个问题)。 -所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 +所以需要引入一个转换方法,并且只需要在必要的时候转换这种格式,才能更好的发挥MKL-DNN的性能。 diff --git a/doc/v2/dev/contribute_to_paddle_cn.md b/doc/v2/dev/contribute_to_paddle_cn.md index d8bf093e09..3244eedf91 100644 --- a/doc/v2/dev/contribute_to_paddle_cn.md +++ b/doc/v2/dev/contribute_to_paddle_cn.md @@ -51,6 +51,8 @@ Paddle 开发人员使用 [pre-commit](http://pre-commit.com/) 工具来管理 G Paddle 使用 `clang-format` 来调整 C/C++ 源代码格式,请确保 `clang-format` 版本在 3.8 以上。 +注:通过`pip install pre-commit`和`conda install -c conda-forge pre-commit`安装的`yapf`稍有不同的,Paddle 开发人员使用的是`pip install pre-commit`。 + ## 开始开发 在本例中,我删除了 README.md 中的一行,并创建了一个新文件。 @@ -102,7 +104,7 @@ no changes added to commit (use "git add" and/or "git commit -a") ➜ docker run -it -v $(pwd):/paddle paddle:latest-dev bash -c "cd /paddle/build && ctest" ``` -关于构建和测试的更多信息,请参见[这篇文档](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/getstarted/build_and_install/docker_install_cn.rst)。 +关于构建和测试的更多信息,请参见[使用Docker安装运行](https://github.com/PaddlePaddle/Paddle/blob/develop/doc/v2/build_and_install/docker_install_cn.rst)。 ## 提交(commit) diff --git a/doc/v2/dev/new_layer_cn.rst b/doc/v2/dev/new_layer_cn.rst index 3115654b2b..e5a1434612 100644 --- a/doc/v2/dev/new_layer_cn.rst +++ b/doc/v2/dev/new_layer_cn.rst @@ -58,7 +58,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。 实现C++类 =================== -一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 +一个网络层的C++类需要实现初始化,前向和后向。全连接层的实现位于:code:`paddle/legacy/gserver/layers/FullyConnectedLayer.h`及:code:`paddle/legacy/gserver/layers/FullyConnectedLayer.cpp`。这里我们展示一份简化过的代码。 这个类需要继承 :code:`paddle::Layer` 这个基类,并且需要重写基类中的以下几个虚函数: @@ -153,7 +153,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。 - 每个层在其 :code:`forward` 函数的开头必须调用 :code:`Layer::forward(passType);` 。 - 之后使用 :code:`reserveOutput(batchSize, size);` 为输出分配内存。由于我们支持训练数据有不同的批次大小,所以这一步是必要的。 :code:`reserveOutput` 会相应地改变输出的尺寸。为了保证效率,如果需要扩大矩阵,我们会重新分配内存;如果需要缩减矩阵,我们会继续使用现有的内存块。 -- 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/math/Matrix.h`和:code:`paddle/math/BaseMatrix.h` 。 +- 之后使用矩阵运算函数来计算 :math:`\sum_i W_i x + b`。:code:`getInput(i).value` 返回第i个输入矩阵。每个输入都是一个 :math:`batchSize \times dim` 的矩阵,每行表示一个批次中的单个输入。对于我们支持的全部矩阵操作,请参考 :code:`paddle/legacy/math/Matrix.h`和:code:`paddle/legacy/math/BaseMatrix.h` 。 - 最终,使用 :code:`forwardActivation();` 进行激活操作。这会自动进行网络配置中声明的激活操作。 @@ -262,7 +262,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。 REGISTER_LAYER(fc, FullyConnectedLayer); } -若 :code:`cpp` 被放在 :code:`paddle/gserver/layers` 目录下,其会自动被加入编译列表。 +若 :code:`cpp` 被放在 :code:`paddle/legacy/gserver/layers` 目录下,其会自动被加入编译列表。 写梯度检查单元测试 @@ -270,7 +270,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。 写梯度检查单元测试是一个验证新实现的层是否正确的相对简单的办法。梯度检查单元测试通过有限差分法来验证一个层的梯度。首先对输入做一个小的扰动 :math:`\Delta x` ,然后观察到输出的变化为 :math:`\Delta y` ,那么,梯度就可以通过这个方程计算得到 :math:`\frac{\Delta y}{\Delta x }` 。之后,再用这个梯度去和 :code:`backward` 函数得到的梯度去对比,以保证梯度计算的正确性。需要注意的是梯度检查仅仅验证了梯度的计算,并不保证 :code:`forward` 和 :code:`backward` 函数的实现是正确的。你需要一些更复杂的单元测试来保证你实现的网络层是正确的。 -所有网络层的梯度检查单测都位于 :code:`paddle/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: +所有网络层的梯度检查单测都位于 :code:`paddle/legacy/gserver/tests/test_LayerGrad.cpp` 。我们建议你在写新网络层时把测试代码放入新的文件中。下面列出了全连接层的梯度检查单元测试。它包含以下几步: + 生成网络层配置。网络层配置包含以下几项: - 偏置参数的大小。(例子中是4096) @@ -322,7 +322,7 @@ PaddlePaddle的base layer类可以自动计算上面的导数。 } } -如果你要为了测试而增加新的文件,例如 :code:`paddle/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单测都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单测正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 +如果你要为了测试而增加新的文件,例如 :code:`paddle/legacy/gserver/tests/testFCGrad.cpp` ,你需要把该文件加入 :code:`paddle/legacy/gserver/tests/CMakeLists.txt` 中。下面给出了一个例子。当你执行命令 :code:`make tests` 时,所有的单测都会被执行一次。注意,有些层可能需要高精度来保证梯度检查单测正确执行。你需要在配置cmake时将 :code:`WITH_DOUBLE` 设置为 `ON` 。 .. code-block:: bash diff --git a/doc/v2/dev/new_layer_en.rst b/doc/v2/dev/new_layer_en.rst index b05bb45f11..ad72373880 100644 --- a/doc/v2/dev/new_layer_en.rst +++ b/doc/v2/dev/new_layer_en.rst @@ -58,7 +58,7 @@ Finally we can use chain rule to calculate :math:`\frac{\partial z}{\partial x}` Implement C++ Class =================== -The C++ class of the layer implements the initialization, forward, and backward part of the layer. The fully connected layer is at :code:`paddle/gserver/layers/FullyConnectedLayer.h` and :code:`paddle/gserver/layers/FullyConnectedLayer.cpp`. We list simplified version of the code below. +The C++ class of the layer implements the initialization, forward, and backward part of the layer. The fully connected layer is at :code:`paddle/legacy/gserver/layers/FullyConnectedLayer.h` and :code:`paddle/legacy/gserver/layers/FullyConnectedLayer.cpp`. We list simplified version of the code below. It needs to derive the base class :code:`paddle::Layer`, and it needs to override the following functions: @@ -154,7 +154,7 @@ The implementation of the forward part has the following steps. - Every layer must call :code:`Layer::forward(passType);` at the beginning of its :code:`forward` function. - Then it allocates memory for the output using :code:`reserveOutput(batchSize, size);`. This step is necessary because we support the batches to have different batch sizes. :code:`reserveOutput` will change the size of the output accordingly. For the sake of efficiency, we will allocate new memory if we want to expand the matrix, but we will reuse the existing memory block if we want to shrink the matrix. -- Then it computes :math:`\sum_i W_i x + b` using Matrix operations. :code:`getInput(i).value` retrieve the matrix of the i-th input. Each input is a :math:`batchSize \times dim` matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to :code:`paddle/math/Matrix.h` and :code:`paddle/math/BaseMatrix.h`. +- Then it computes :math:`\sum_i W_i x + b` using Matrix operations. :code:`getInput(i).value` retrieve the matrix of the i-th input. Each input is a :math:`batchSize \times dim` matrix, where each row represents an single input in a batch. For a complete lists of supported matrix operations, please refer to :code:`paddle/legacy/math/Matrix.h` and :code:`paddle/legacy/math/BaseMatrix.h`. - Finally it applies the activation function using :code:`forwardActivation();`. It will automatically applies the corresponding activation function specifies in the network configuration. @@ -263,7 +263,7 @@ Finally, you can use :code:`REGISTER_LAYER(fc, FullyConnectedLayer);` to registe REGISTER_LAYER(fc, FullyConnectedLayer); } -If the :code:`cpp` file is put into :code:`paddle/gserver/layers`, it will be automatically added to the compilation list. +If the :code:`cpp` file is put into :code:`paddle/legacy/gserver/layers`, it will be automatically added to the compilation list. Write Gradient Check Unit Test @@ -271,7 +271,7 @@ Write Gradient Check Unit Test An easy way to verify the correctness of new layer's implementation is to write a gradient check unit test. Gradient check unit test utilizes finite difference method to verify the gradient of a layer. It modifies the input with a small perturbation :math:`\Delta x` and observes the changes of output :math:`\Delta y`, the gradient can be computed as :math:`\frac{\Delta y}{\Delta x }`. This gradient can be compared with the gradient computed by the :code:`backward` function of the layer to ensure the correctness of the gradient computation. Notice that the gradient check only tests the correctness of the gradient computation, it does not necessarily guarantee the correctness of the implementation of the :code:`forward` and :code:`backward` function. You need to write more sophisticated unit tests to make sure your layer is implemented correctly. -All the gradient check unit tests are located in :code:`paddle/gserver/tests/test_LayerGrad.cpp`. You are recommended to put your test into a new test file if you are planning to write a new layer. The gradient test of the gradient check unit test of the fully connected layer is listed below. It has the following steps. +All the gradient check unit tests are located in :code:`paddle/legacy/gserver/tests/test_LayerGrad.cpp`. You are recommended to put your test into a new test file if you are planning to write a new layer. The gradient test of the gradient check unit test of the fully connected layer is listed below. It has the following steps. + Create layer configuration. A layer configuration can include the following attributes: - size of the bias parameter. (4096 in our example) @@ -323,7 +323,7 @@ All the gradient check unit tests are located in :code:`paddle/gserver/tests/tes } } -If you are creating a new file for the test, such as :code:`paddle/gserver/tests/testFCGrad.cpp`, you need to add the file to :code:`paddle/gserver/tests/CMakeLists.txt`. An example is given below. All the unit tests will run when you execute the command :code:`make tests`. Notice that some layers might need high accuracy for the gradient check unit tests to work well. You need to configure :code:`WITH_DOUBLE` to `ON` when configuring cmake. +If you are creating a new file for the test, such as :code:`paddle/legacy/gserver/tests/testFCGrad.cpp`, you need to add the file to :code:`paddle/legacy/gserver/tests/CMakeLists.txt`. An example is given below. All the unit tests will run when you execute the command :code:`make tests`. Notice that some layers might need high accuracy for the gradient check unit tests to work well. You need to configure :code:`WITH_DOUBLE` to `ON` when configuring cmake. .. code-block:: bash @@ -339,7 +339,7 @@ If you are creating a new file for the test, such as :code:`paddle/gserver/tests Implement Python Wrapper ======================== -Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file :code:`python/paddle/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: +Implementing Python wrapper allows us to use the added layer in configuration files. All the Python wrappers are in file :code:`python/paddle/legacy/trainer/config_parser.py`. An example of the Python wrapper for fully connected layer is listed below. It has the following steps: - Use :code:`@config_layer('fc')` at the decorator for all the Python wrapper class. :code:`fc` is the identifier of the layer. - Implements :code:`__init__` constructor function. diff --git a/doc/v2/faq/build_and_install/index_cn.rst b/doc/v2/faq/build_and_install/index_cn.rst index f292684fb5..0d64477728 100644 --- a/doc/v2/faq/build_and_install/index_cn.rst +++ b/doc/v2/faq/build_and_install/index_cn.rst @@ -213,3 +213,12 @@ virtualenv本身也是Python的一个包,可以用pip进行安装: 保存并关闭文件。 这样,每次打开终端时就会自动启动名为‘paddle’的Python环境了。 + +10. 通过pip安装的PaddlePaddle在 :code:`import paddle.fluid` 报找不到 :code:`libmkldnn.so` 或 :code:`libmklml_intel.so` +------------------------------------------------------------------------------------------ +出现这种问题的原因是在导入 :code:`paddle.fluid` 时需要加载 :code:`libmkldnn.so` 和 :code:`libmklml_intel.so`, +但是系统没有找到该文件。一般通过pip安装PaddlePaddle时会将 :code:`libmkldnn.so` 和 :code:`libmklml_intel.so` +拷贝到 :code:`/usr/local/lib` 路径下,所以解决办法是将该路径加到 :code:`LD_LIBRARY_PATH` 环境变量下, +即: :code:`export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH` 。 + +**注意**:如果是在虚拟环境中安装PaddlePaddle, :code:`libmkldnn.so` 和 :code:`libmklml_intel.so` 可能不在 :code:`/usr/local/lib` 路径下。 \ No newline at end of file diff --git a/doc/v2/faq/parameter/index_cn.rst b/doc/v2/faq/parameter/index_cn.rst index 1fa4b3e131..987e8cf088 100644 --- a/doc/v2/faq/parameter/index_cn.rst +++ b/doc/v2/faq/parameter/index_cn.rst @@ -196,6 +196,6 @@ PaddlePaddle保存的模型参数文件内容由16字节头信息和网络参数 obj="process", args={"src_dict_path": src_dict_path}) -完整源码可参考 `sequence_recurrent `_ 示例。 +完整源码可参考 `sequence_recurrent `_ 示例。 diff --git a/doc/v2/faq/parameter/index_en.rst b/doc/v2/faq/parameter/index_en.rst index 61c7845af7..9edb8dd620 100644 --- a/doc/v2/faq/parameter/index_en.rst +++ b/doc/v2/faq/parameter/index_en.rst @@ -1,5 +1,198 @@ -################# -Parameter Setting -################# +################## +Parameter Settings +################## -TBD +.. contents:: + +1. How to Choose the Learning Rate of SGD Algorithm +-------------------------- + +An important issue when training with :code:`sgd/async_sgd` is to choose the correct value for :code:`learning_rate`. If it is too large, the training may not converge. If too small, the convergence may be slow, resulting in a long training time. + +Usually, we start with a relatively large learning rate. If the training does not converge, then we need to reduce the learning rate continuously by a factor of 10 until the training converges. We examine the convergence of the training by estimating the minimum cost at a constant output of the model. + +If the cost of the training process is significantly higher than the cost of the output, then we judge that the training does not converge. For example, if we have a three-class problem and use multi-class-cross-entropy as the cost, the ratio of 0, 1, and 2 in the data will be :code:`0.2, 0.5, 0.3`. The minimum cost thus will be :code:`-(0.2*log(0.2)+0.5*log(0.5)+0.3*log(0.3))=1.03`. If the cost is greater than this number after training a pass (or even before), then the training may not be converged and the learning rate should be reduced. + +2. How to Implement Learning Rate Annealing +------------------------------------------------ + +We use the Adam algorithm as an example. Set the parameters of :code:`learning_rate_schedule` in the corresponding optimization algorithm as follows: + +.. code-block:: python + +    Optimizer = paddle.optimizer.Adam( +        Learning_rate=1e-3, +        Learning_rate_decay_a=0.5, +        Learning_rate_decay_b=0.75, +        Learning_rate_schedule="poly",) + +PaddlePaddle currently supports 8 learning rate schedules. The 8 learning rate schedules and their corresponding learning rates are calculated as follows: + +* "constant" +   +  Lr = learning_rate + +* "poly" + +  Lr = learning_rate * pow(1 + learning_rate_decay_a * num_samples_processed, -learning_rate_decay_b) + +  Variable :code:`num_samples_processed` is the number of trained samples. + +* "caffe_poly" + +  Lr = learning_rate * pow(1.0 - num_samples_processed / learning_rate_decay_a, learning_rate_decay_b) + +* "exp" + +  Lr = learning_rate * pow(learning_rate_decay_a, num_samples_processed / learning_rate_decay_b) + +* "discexp" + +  Lr = learning_rate * pow(learning_rate_decay_a, floor(num_samples_processed / learning_rate_decay_b)) + +* "linear" + +  Lr = max(learning_rate - learning_rate_decay_a * num_samples_processed, learning_rate_decay_b) + +* "manual" + +  This is a learning rate annealing method that is segmented by the number of trained samples. When using this learning rate schedule, we modify the learning rate attenuation factor piecewise function by changing the parameter :code:`learning_rate_args`. The current learning rate is the product of :code:`learning_rate` and the current attenuation factor. Take the Adam algorithm as an example: + +  .. code-block:: python + +      Optimizer = paddle.optimizer.Adam( +          Learning_rate=1e-3, +          Learning_rate_schedule="manual", +          Learning_rate_args="1000:1.0,2000:0.9,3000:0.8",) + +  In this example, when the number of trained samples is less than or equal to 1000, the learning rate is: code:`1e-3*1.0`; when the number of trained samples is greater than 1000 or less than or equal to 2000, the learning rate is:code:`1e- 3 * 0.9`; when the number of trained samples is greater than 2,000, the learning rate is: code:`1e-3*0.8`. + +* "pass_manual" + +  This is a learning rate annealing method that piecewisely pick values according to the number of trained passes. When using this learning rate schedule, we set the learning rate attenuation factor piecewise function by the parameter :code:`learning_rate_args`. The current learning rate is the product of :code:`learning_rate` and the current attenuation factor. Take the Adam algorithm as an example: + +  .. code-block:: python + +      Optimizer = paddle.optimizer.Adam( +          Learning_rate=1e-3, +          Learning_rate_schedule="pass_manual", +          Learning_rate_args="1:1.0,2:0.9,3:0.8",) + +  In this example, when the number of trained passes is less than or equal to 1, the learning rate is :code:`1e-3*1.0`; when the number of trained passes is greater than 1 or less than 2, the learning rate is :code:`1e- 3 * 0.9`; when the number of trained passes is greater than 2, the learning rate is :code:`1e-3*0.8`. + +3. How to Initialize Parameters +----------------- + +By default, PaddlePaddle initializes parameters with an average of 0 and a standard deviation of :math:`\frac{1}{\sqrt{d}}`, where :math:`d` is the width of the parameter matrix. This initialization method does not produce bad results under normal circumstances. If users want to customize the initialization method, PaddlePaddle provides two ways to initialize the parameters: + +* Gaussian distribution. Set :code:`param_attr` to :code:`param_attr=ParamAttr(initial_mean=0.0, initial_std=1.0)` +* Uniform distribution. Set :code:`param_attr` to :code:`param_attr=ParamAttr(initial_max=1.0, initial_min=-1.0)` + +For example, to set a full connection layer parameter initialization mode and bias initialization mode, you can use the following code: + +.. code-block:: python + +    Hidden = fc_layer(input=ipt, param_attr=ParamAttr(initial_max=1.0, initial_min=-1.0), +                      Bias_attr=ParamAttr(initial_mean=1.0, initial_std=0.0)) + +The above code initializes the bias to 1.0 and initializes the parameters to a uniform distribution of :code:`[1.0, -1.0]`. + +4. How to Share Parameters +--------------- + +PaddlePaddle's parameters use :code:`name` as the ID. Parameters with the same name will share parameters//. We can set the name of the parameters using :code:`ParamAttr(name="YOUR_PARAM_NAME")`. More conveniently, we can make the parameters to be shared use the same :code:`ParamAttr` object. + +A simple fully connected network has its configuration of parameter sharing as follows \: + +.. literalinclude:: ../../python/paddle/trainer_config_helpers/tests/configs/shared_fc.py + +Here :code:`hidden_a` and :code:`hidden_b` have the same parameter and bias. The two input of the softmax layer also use the same parameter :code:`softmax_param`. + +5. How to Load Pre-training Parameters +------------------------ +* For layers that load pre-training parameters, set :code:`is_static = True` so that the parameters of that layer remain unchanged during the training process. Take the embedding layer as an example, the code is as follows: + +.. code-block:: python + +    Emb_para = paddle.attr.Param(name='emb', is_static=True) +    Paddle.layer.embedding(size=word_dim, input=x, param_attr=emb_para) + + +* Load pre-training parameters from the model file into :code:`numpy.array`. After creating the parameters, load the pre-training parameters using :code:`parameters.set()`. The first 16 bytes of the model parameter file saved by PaddlePaddle is the header information. The user must loads : :code:`numpy.array` starting with the 17th byte. Take the embedding layer as an example, the code is as follows: + +.. code-block:: python + +    Def load_parameter(file_name, h, w): +        With open(file_name, 'rb') as f: +            F.read(16) # skip header. +            Return np.fromfile(f, dtype=np.float32).reshape(h, w) + +    Parameters = paddle.parameters.create(my_cost) +    Parameters.set('emb', load_parameter(emb_param_file, 30000, 256)) + +6. Format of the Stored Parameter and How to Convert the File to Plain Text +-------------------------------------------------- + +The model parameter file saved by PaddlePaddle consists of 16 bytes of header information and network parameters. In the header information, the first four bytes show PaddlePaddle's version information. The user should fill in with 0s. The next four bytes represent the number of bytes occupied by each parameter. If the saved network parameter is a float type, the number is four; if it is a double, the number is eight. The third group of four bytes represents the total number of saved parameters. + +When restoring the model parameters saved by PaddlePaddle back to plain text, we use the corresponding data type :code:`numpy.array` to load specific network parameters. At this time, you can skip the header information of the PaddlePaddle model parameter file. If not specified to compile with a precision for double in PaddlePaddle, then the parameter file will be caiculated with a precision for float, and the argument will be stored as a float. In this case, when using :code:`numpy.array`, generally we set :code:`dtype=float32`. An example is as follows: + +.. code-block:: python + +    Def read_parameter(fname, width): +        s = open(fname).read() +        # skip header +        Vec = np.fromstring(s[16:], dtype=np.float32) +        # width is the size of the corresponding layer +        Np.savetxt(fname + ".csv", vec.reshape(width, -1), +                Fmt="%.6f", delimiter=",") + + +When the plaintext parameters are converted into PaddlePaddle loadable model parameters, the header information is constructed first, then the network parameters are written. The following code converts the randomly generated matrix into model parameters that can be loaded by PaddlePaddle: + +.. code-block:: python + +    Def gen_rand_param(param_file, width, height, need_trans): +        Np.random.seed() +        Header = struct.pack("iil", 0, 4, height * width) +        Param = np.float32(np.random.rand(height, width)) +        With open(param_file, "w") as fparam: +            Fparam.write(header + param.tostring()) + +7. A Protocol Message Rejected Because of its Large Size +-------------------------------------------------- ---------- + +If you are training NLP related models, and the following error occurs: + +.. code-block:: bash + +    [libprotobuf ERROR google/protobuf/io/coded_stream.cc:171] A protocol message was rejected because it was too big (more than 67108864 bytes). To increase the limit (or to disable these warnings), see CodedInputStream::SetTotalBytesLimit( ) in google/protobuf/io/coded_stream.h. +    F1205 14:59:50.295174 14703 TrainerConfigHelper.cpp:59] Check failed: m->conf.ParseFromString(configProtoStr) + +The possible reason is that one of the args passed to the dataprovider is too large, which is usually caused by directly passing a large dictionary. A wrongly defineed `_py_data_sources2` is similar to: + +.. code-block:: python + +     Src_dict = dict() +     For line_count, line in enumerate(open(src_dict_path, "r")): +        Src_dict[line.strip()] = line_count + +     Define_py_data_sources2( +        Train_list, +        Test_list, +        Module="dataprovider", +        Obj="process", +        Args={"src_dict": src_dict}) + +The solution is to pass the address of the dictionary as args to the dataprovider, and then load the dictionary according to the address in the dataprovider. Change `_py_data_sources2` to: + +.. code-block:: python + +     Define_py_data_sources2( +        Train_list, +        Test_list, +        Module="dataprovider", +        Obj="process", +        Args={"src_dict_path": src_dict_path}) + +The full source code can be found in the `sequence_recurrent `_ example. diff --git a/doc/v2/howto/capi/compile_paddle_lib_cn.md b/doc/v2/howto/capi/compile_paddle_lib_cn.md index e223fd33a8..8878ee9d85 100644 --- a/doc/v2/howto/capi/compile_paddle_lib_cn.md +++ b/doc/v2/howto/capi/compile_paddle_lib_cn.md @@ -18,24 +18,29 @@ cpu_avx_openblas -暂无 +paddle.tgz cpu_noavx_openblas -paddle.tgz +paddle.tgz cuda7.5_cudnn5_avx_mkl -paddle.tgz +paddle.tgz cuda8.0_cudnn5_avx_mkl -paddle.tgz +paddle.tgz cuda8.0_cudnn7_avx_mkl -paddle.tgz - +paddle.tgz + + +cuda9.0_cudnn7_avx_mkl +paddle.tgz + + ### 从源码编译 diff --git a/doc/v2/howto/capi/compile_paddle_lib_en.md b/doc/v2/howto/capi/compile_paddle_lib_en.md index 6212a30811..70a6edef27 100644 --- a/doc/v2/howto/capi/compile_paddle_lib_en.md +++ b/doc/v2/howto/capi/compile_paddle_lib_en.md @@ -13,28 +13,33 @@ cpu_avx_mkl -paddle.tgz +paddle.tgz cpu_avx_openblas -- +paddle.tgz cpu_noavx_openblas -paddle.tgz +paddle.tgz cuda7.5_cudnn5_avx_mkl -paddle.tgz +paddle.tgz cuda8.0_cudnn5_avx_mkl -paddle.tgz +paddle.tgz cuda8.0_cudnn7_avx_mkl -paddle.tgz - +paddle.tgz + + +cuda9.0_cudnn7_avx_mkl +paddle.tgz + + ### From source diff --git a/doc/v2/howto/capi/workflow_of_capi_cn.md b/doc/v2/howto/capi/workflow_of_capi_cn.md index 1968c1099a..db1568a2af 100644 --- a/doc/v2/howto/capi/workflow_of_capi_cn.md +++ b/doc/v2/howto/capi/workflow_of_capi_cn.md @@ -28,9 +28,9 @@ ### 准备预测模型 -准备预测模型部分,我们以手写数字识别任务为例进行介绍。手写数字识别任务定义了一个含有[两个隐层的简单全连接网络](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md#softmax回归softmax-regression),网络接受一幅图片作为输入,将图片分类到 0 ~ 9 类别标签之一。完整代码可以查看[此目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense) 中的相关脚本。 +准备预测模型部分,我们以手写数字识别任务为例进行介绍。手写数字识别任务定义了一个含有[两个隐层的简单全连接网络](https://github.com/PaddlePaddle/book/blob/develop/02.recognize_digits/README.cn.md#softmax回归softmax-regression),网络接受一幅图片作为输入,将图片分类到 0 ~ 9 类别标签之一。完整代码可以查看[此目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense) 中的相关脚本。 -调用C-API开发预测程序需要一个训练好的模型,运行[MNIST手写数字识别目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)下的[mnist_v2.py](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/examples/model_inference/dense/mnist_v2.py)脚本,在终端执行`python mnist_v2.py`,会使用 PaddlePaddle 内置的 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)进行训练。训练好的模型默认保存在当前运行目录下的`models`目录中。 +调用C-API开发预测程序需要一个训练好的模型,运行[MNIST手写数字识别目录](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense)下的[mnist_v2.py](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py)脚本,在终端执行`python mnist_v2.py`,会使用 PaddlePaddle 内置的 [MNIST 数据集](http://yann.lecun.com/exdb/mnist/)进行训练。训练好的模型默认保存在当前运行目录下的`models`目录中。 下面,我们将训练结束后存储下来的模型转换成预测模型。 @@ -48,7 +48,7 @@ dump_v2_config(predict, "trainer_config.bin", True) ``` - 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,[`mnist_v2.py`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/mnist_v2.py)脚本集成了序列化神经网络结构的过程,可以直接运行 `python mnist_v2.py --task dump_config` 对神经网络结构进行序列化,结果会写入当前运行目录下的`trainer_config.bin`文件中。 + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense)这个示例,[`mnist_v2.py`](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py)脚本集成了序列化神经网络结构的过程,可以直接运行 `python mnist_v2.py --task dump_config` 对神经网络结构进行序列化,结果会写入当前运行目录下的`trainer_config.bin`文件中。 使用这种方式,需要**在运行时将神经网络的多个可学习参数放在同一个目录中**,C-API可以通过分别指定序列化后的网络结构文件和参数目录来加载训练好的模型。 @@ -59,7 +59,7 @@ 代码示例如下: ```python - from paddle.utils.merge_model import merge_v2_modelss + from paddle.utils.merge_model import merge_v2_model from mnist_v2 import network net = network(is_infer=True) @@ -68,7 +68,7 @@ merge_v2_model(net, param_file, output_file) ``` - 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。 + 对[手写数字识别](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense)这个示例,可直接运行 `python` [merge_v2_model.py](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference/dense/merge_v2_model.py)。序列化结果会写入当前运行目录下的`output.paddle.model`文件中。使用这种方式,运行时C-API可以通过指定`output.paddle.model`文件的路径来加载预测模型。 #### 注意事项 1. 为使用C-API,在调用`dump_v2_config`序列化神经网络结构时,参数`binary`必须指定为`True`。 @@ -77,10 +77,10 @@ ### 编写预测代码 -预测代码更多详细示例代码请参考[C-API使用示例](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/capi/examples/model_inference) 目录下的代码示例。这一节对图1中预测代码编写的5个步骤进行介绍和说明。 +预测代码更多详细示例代码请参考[C-API使用示例](https://github.com/PaddlePaddle/Paddle/tree/develop/paddle/legacy/capi/examples/model_inference) 目录下的代码示例。这一节对图1中预测代码编写的5个步骤进行介绍和说明。 #### step 1. 初始化PaddlePaddle运行环境 -第一步需调用[`paddle_init`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/main.h#L27) 初始化PaddlePaddle运行环境,该接口接受两个参数:参数的个数和参数列表。 +第一步需调用[`paddle_init`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/main.h#L27) 初始化PaddlePaddle运行环境,该接口接受两个参数:参数的个数和参数列表。 #### step2. 加载模型 @@ -88,8 +88,8 @@ 概念上,在 PaddlePaddle 内部,一个GradientMachine类的对象管理着一组计算层(PaddlePaddle Layers)来完成前向和反向计算,并处理与之相关的所有细节。在调用C-API预测时,只需进行前向计算而无需调用反向计算。这篇文档之后部分会使用`gradient machine`来特指调用PaddlePaddle C-API创建的GradientMachine类的对象。每一个 `gradient machine` 都会管理维护一份训练好的模型,下面是C-API提供的,两种常用的模型加载方式: -1. 调用[`paddle_gradient_machine_load_parameter_from_disk`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L61)接口,从磁盘加载预测模型。这时`gradient machine`会独立拥有一份训练好的模型; -1. 调用[`paddle_gradient_machine_create_shared_param`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L88)接口,与其它`gradient machine`的共享已经加载的预测模型。这种情况多出现在使用多线程预测时,通过多个线程共享同一个模型来减少内存开销。可参考[此示例](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/examples/model_inference/multi_thread/main.c)。 +1. 调用[`paddle_gradient_machine_load_parameter_from_disk`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/gradient_machine.h#L61)接口,从磁盘加载预测模型。这时`gradient machine`会独立拥有一份训练好的模型; +1. 调用[`paddle_gradient_machine_create_shared_param`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/gradient_machine.h#L88)接口,与其它`gradient machine`的共享已经加载的预测模型。这种情况多出现在使用多线程预测时,通过多个线程共享同一个模型来减少内存开销。可参考[此示例](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/examples/model_inference/multi_thread/main.c)。 - 注意事项 @@ -117,7 +117,7 @@ C-API支持的所有输入数据类型和他们的组织方式,请参考“输 #### step 4. 前向计算 -完成上述准备之后,通过调用 [`paddle_gradient_machine_forward`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/capi/gradient_machine.h#L73) 接口完成神经网络的前向计算。 +完成上述准备之后,通过调用 [`paddle_gradient_machine_forward`](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/legacy/capi/gradient_machine.h#L73) 接口完成神经网络的前向计算。 #### step 5. 清理 diff --git a/doc/v2/howto/optimization/gpu_profiling_cn.rst b/doc/v2/howto/optimization/gpu_profiling_cn.rst index 25bcaccb69..f2396716bd 100644 --- a/doc/v2/howto/optimization/gpu_profiling_cn.rst +++ b/doc/v2/howto/optimization/gpu_profiling_cn.rst @@ -50,12 +50,12 @@ GPU则还需要高并行性,才能发挥其全部能力。这正是它们速 **nvprof** 是Nvidia性能分析工具, **nvvp** 则是带GUI的Nvidia可视化性能分析工具。 在这个教程中,我们主要会介绍nvprof和nvvp。 -:code:`test_GpuProfiler` from :code:`paddle/math/tests` directory will be used to evaluate +:code:`test_GpuProfiler` from :code:`paddle/legacy/math/tests` directory will be used to evaluate above profilers. -:code:`paddle/math/test` 目录中的 :code:`test_GpuProfiler` 就是用于展示上述分析工具的用法。 +:code:`paddle/legacy/math/test` 目录中的 :code:`test_GpuProfiler` 就是用于展示上述分析工具的用法。 -.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp +.. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp :language: c++ :lines: 137-151 :linenos: @@ -83,7 +83,7 @@ program crashes when CPU version of PaddlePaddle invokes them. 1. 加入 :code:`REGISTER_TIMER_INFO` 和 :code:`printAllStatus` 函数(如高亮部分)。 - .. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp + .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp :language: c++ :lines: 137-151 :emphasize-lines: 8-12,14 @@ -101,8 +101,8 @@ program crashes when CPU version of PaddlePaddle invokes them. .. code-block:: bash :emphasize-lines: 1,12-15 - > ./paddle/math/tests/test_GpuProfiler - I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/math/tests/test_GpuProfiler + > ./paddle/legacy/math/tests/test_GpuProfiler + I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/legacy/math/tests/test_GpuProfiler I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. [==========] Running 1 test from 1 test case. @@ -130,7 +130,7 @@ nvprof 工具 1. 将 :code:`REGISTER_GPU_PROFILER` 函数加到代码中(参考强调部分)。 - .. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp + .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp :language: c++ :lines: 137-151 :emphasize-lines: 6-7 @@ -147,13 +147,13 @@ nvprof 工具 .. code-block:: bash - nvprof ./paddle/math/tests/test_GpuProfiler + nvprof ./paddle/legacy/math/tests/test_GpuProfiler 然后,您就能获得如下的分析结果: .. code-block:: bash - ==78544== Profiling application: ./paddle/math/tests/test_GpuProfiler + ==78544== Profiling application: ./paddle/legacy/math/tests/test_GpuProfiler ==78544== Profiling result: Time(%) Time Calls Avg Min Max Name 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] diff --git a/doc/v2/howto/optimization/gpu_profiling_en.rst b/doc/v2/howto/optimization/gpu_profiling_en.rst index 50adb7da24..6e439be9bb 100644 --- a/doc/v2/howto/optimization/gpu_profiling_en.rst +++ b/doc/v2/howto/optimization/gpu_profiling_en.rst @@ -51,10 +51,10 @@ For general GPU profiling, a bunch of tools are provided from both NVIDIA and th **nvprof** is Nvidia profiler and **nvvp** is (GUI based) Nvidia visual profiler. In this tutorial, we will focus on nvprof and nvvp. -:code:`test_GpuProfiler` from :code:`paddle/math/tests` directory will be used to evaluate +:code:`test_GpuProfiler` from :code:`paddle/legacy/math/tests` directory will be used to evaluate above profilers. -.. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp +.. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp :language: c++ :lines: 137-151 :linenos: @@ -80,7 +80,7 @@ As a simple example, consider the following: 1. Add :code:`REGISTER_TIMER_INFO` and :code:`printAllStatus` functions (see the emphasize-lines). - .. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp + .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp :language: c++ :lines: 137-151 :emphasize-lines: 8-12,14 @@ -98,8 +98,8 @@ As a simple example, consider the following: .. code-block:: bash :emphasize-lines: 1,12-15 - > ./paddle/math/tests/test_GpuProfiler - I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/math/tests/test_GpuProfiler + > ./paddle/legacy/math/tests/test_GpuProfiler + I1117 11:13:42.313065 2522362816 Util.cpp:155] commandline: ./paddle/legacy/math/tests/test_GpuProfiler I1117 11:13:42.845065 2522362816 Util.cpp:130] Calling runInitFunctions I1117 11:13:42.845208 2522362816 Util.cpp:143] Call runInitFunctions done. [==========] Running 1 test from 1 test case. @@ -127,7 +127,7 @@ To use this command line profiler **nvprof**, you can simply issue the following 1. Add :code:`REGISTER_GPU_PROFILER` function (see the emphasize-lines). - .. literalinclude:: ../../../../paddle/math/tests/test_GpuProfiler.cpp + .. literalinclude:: ../../../../paddle/legacy/math/tests/test_GpuProfiler.cpp :language: c++ :lines: 137-151 :emphasize-lines: 6-7 @@ -144,13 +144,13 @@ To use this command line profiler **nvprof**, you can simply issue the following .. code-block:: bash - nvprof ./paddle/math/tests/test_GpuProfiler + nvprof ./paddle/legacy/math/tests/test_GpuProfiler Then, you can get the following profiling result: .. code-block:: bash - ==78544== Profiling application: ./paddle/math/tests/test_GpuProfiler + ==78544== Profiling application: ./paddle/legacy/math/tests/test_GpuProfiler ==78544== Profiling result: Time(%) Time Calls Avg Min Max Name 27.60% 9.6305ms 5 1.9261ms 3.4560us 6.4035ms [CUDA memcpy HtoD] diff --git a/doc/v2/howto/rnn/hierarchical_layer_en.rst b/doc/v2/howto/rnn/hierarchical_layer_en.rst index 236f58a160..fb668f1bab 100644 --- a/doc/v2/howto/rnn/hierarchical_layer_en.rst +++ b/doc/v2/howto/rnn/hierarchical_layer_en.rst @@ -1,4 +1,89 @@ -Layers supporting hierarchical sequence as input -================================================ - -TBD +########################### +Layers that Support Hierarchical Sequences as Input +########################### +  +.. contents:: +  +Overview +==== +  +A sequence is a common data type in natural language processing tasks. An independent word can be regarded as a non-sequential input or a 0-level sequence. A sentence made up of words is a single-level sequence; a number of sentences make up a paragraph, which is a double-level sequence. +  +A double-level sequence is a nested sequence where each element is a single-level sequence. This is a very flexible way of organizing data that helps us construct some complex input information. +  +We can define non-sequences, single-level sequences, and double-level sequences at the following levels. +  ++ 0-level sequence: an independent element. Its type can be any input data type supported by PaddlePaddle; ++ Single-level sequence: multiple elements arranged in a row; each element is a 0-level sequence. The order of elements is an important input information; ++ Double-level sequence: multiple elements arranged in a row; each element is a single-layer sequence called a subseq of a double-level sequence, and each element of the subseq is a 0-level sequence. +  +In PaddlePaddle, the following layers accept double-layer sequences as input and perform corresponding calculations. +  +`pooling` +======== +  +The use of pooling is as follows: +  +.. code-block:: bash +  +        Seq_pool = pooling(input=layer, +                           Pooling_type=pooling.Max(), +                           Agg_level=AggregateLevel.TO_SEQUENCE) +         +- `pooling_type` currently supports two types: pooling.Max() and pooling.Avg(). +  +- When ʻagg_level=AggregateLevel.TO_NO_SEQUENCE` (default): +  +  - Effect: a double-level sequence input will be converted into a 0-level sequence, and a single-level sequence will be converted into a 0-level sequence  +  - Input: a double-level sequence or a single-level sequence +  - Output: a 0-level sequence which is the average (or maximum) of the entire input sequence (single or double) +  +- When ʻagg_level=AggregateLevel.TO_SEQUENCE`: +  +  - Effect: a double-level sequence will be transformed into a single-level sequence +  - Input: a double-level sequence +  - Output: a single-level sequence where each element of the sequence is the average (or maximum) value of each subseq element of the original double-level sequence. +  +`last_seq` and `first_seq` +===================== +  +An example of using `last_seq` is as follows (usage of `first_seq` is similar). +  +.. code-block:: bash +  +        Last = last_seq(input=layer, +                        Agg_level=AggregateLevel.TO_SEQUENCE) +         +- When ʻagg_level=AggregateLevel.TO_NO_SEQUENCE` (default): +  +  - Effect: a double-level sequence input will be converted into a 0-level sequence, and a single-level sequence will be converted into a 0-level sequence +  - Input: a double-level sequence or a single-level sequence +  - Output: a 0-level sequence, which is the last or the first element of the input sequence (double or single level). +  +- When ʻagg_level=AggregateLevel.TO_SEQUENCE`: +  - Effect: a double-level sequence will be transformed into a single-level sequence +  - Input: a double-level sequence +  - Output: a single-layer sequence in which each element is the last (or first) element of each subseq in a double-level sequence. +  +`expand` +====== +  +The use of expand is as follows. +  +.. code-block:: bash +  +        Ex = expand(input=layer1, +                    Expand_as=layer2, +                    Expand_level=ExpandLevel.FROM_NO_SEQUENCE) +         +- When `expand_level=ExpandLevel.FROM_NO_SEQUENCE` (default): +  +  - Effect: a 0-level sequence is extended to a single-level sequence or a double-level sequence +  - Input: layer1 must be a 0-level sequence to be extended; layer2 can be a single-level sequence or a double-level sequence that provides the extended length information +  - Output: a single-level sequence or a double-level sequence; the type of the output sequence and the number of elements contained in the sequence are the same as layer2. If the output is a single-level sequence, each element of the single-level sequence will be a copy of the layer1 element. If the output is a double-level sequence, each element in the double-level sequence will be a copy of the layer1 element +  +- When `expand_level=ExpandLevel.FROM_SEQUENCE`: +  +  - Effect: a single-level sequence is extended to a double-level sequence +  - Input: layer1 must be a single-level sequence to be extended; layer2 must be a double-level sequence providing extended length information +  - Output: a double-level sequence with the same number of elements as that of layer2. It is required that the number of elements in the single-level sequence be the same as the number of subseq in the double-level sequences. The i-th element of the single-level sequence (the 0-level sequence) is expanded into a single-level sequence that constitutes the i-th subseq of the output, the double-level sequence. diff --git a/doc/v2/howto/rnn/hrnn_rnn_api_compare_cn.rst b/doc/v2/howto/rnn/hrnn_rnn_api_compare_cn.rst index 67c7b774e9..9d6d417075 100644 --- a/doc/v2/howto/rnn/hrnn_rnn_api_compare_cn.rst +++ b/doc/v2/howto/rnn/hrnn_rnn_api_compare_cn.rst @@ -4,7 +4,7 @@ 单双层RNN API对比介绍 ##################### -本文以PaddlePaddle的双层RNN单元测试为示例,用多对效果完全相同的、分别使用单双层RNN作为网络配置的模型,来讲解如何使用双层RNN。本文中所有的例子,都只是介绍双层RNN的API接口,并不是使用双层RNN解决实际的问题。如果想要了解双层RNN在具体问题中的使用,请参考\ :ref:`algo_hrnn_demo`\ 。本文中示例所使用的单元测试文件是\ `test_RecurrentGradientMachine.cpp `_\ 。 +本文以PaddlePaddle的双层RNN单元测试为示例,用多对效果完全相同的、分别使用单双层RNN作为网络配置的模型,来讲解如何使用双层RNN。本文中所有的例子,都只是介绍双层RNN的API接口,并不是使用双层RNN解决实际的问题。如果想要了解双层RNN在具体问题中的使用,请参考\ :ref:`algo_hrnn_demo`\ 。本文中示例所使用的单元测试文件是\ `test_RecurrentGradientMachine.cpp `_\ 。 示例1:双层RNN,子序列间无Memory ================================ @@ -13,8 +13,8 @@ 在本示例中,单层RNN和双层RNN的网络配置,都是将每一句分好词后的句子,使用LSTM作为encoder,压缩成一个向量。区别是RNN使用两层序列模型,将多句话看成一个整体同时使用encoder压缩。二者语意上完全一致。这组语义相同的示例配置如下: -* 单层RNN\: `sequence_layer_group.conf `_ -* 双层RNN\: `sequence_nest_layer_group.conf `_ +* 单层RNN\: `sequence_layer_group.conf `_ +* 双层RNN\: `sequence_nest_layer_group.conf `_ 读取双层序列数据 @@ -24,18 +24,18 @@ - 本例中的原始数据一共有10个样本。每个样本由两部分组成,一个label(此处都为2)和一个已经分词后的句子。这个数据也被单层RNN网络直接使用。 -.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/Sequence/tour_train_wdseg :language: text - 双层序列数据一共有4个样本。 每个样本间用空行分开,整体数据和原始数据完全一样。但于双层序列的LSTM来说,第一个样本同时encode两条数据成两个向量。这四条数据同时处理的句子数量为\ :code:`[2, 3, 2, 3]`\ 。 -.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg.nest +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/Sequence/tour_train_wdseg.nest :language: text -其次,对于两种不同的输入数据类型,不同DataProvider对比如下(`sequenceGen.py `_)\: +其次,对于两种不同的输入数据类型,不同DataProvider对比如下(`sequenceGen.py `_)\: -.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequenceGen.py :language: python :lines: 21-39 :linenos: @@ -47,7 +47,7 @@ - words是原始数据中的每一句话,所对应的词表index数组。它是integer_value_sequence类型的,即整数数组。words即为这个数据中的单层时间序列。 - label是原始数据中对于每一句话的分类标签,它是integer_value类型的。 -.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequenceGen.py :language: python :lines: 42-71 :linenos: @@ -64,7 +64,7 @@ 首先,我们看一下单层RNN的配置。代码中9-15行(高亮部分)即为单层RNN序列的使用代码。这里使用了PaddlePaddle预定义好的RNN处理函数。在这个函数中,RNN对于每一个时间步通过了一个LSTM网络。 -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_layer_group.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_layer_group.conf :language: python :lines: 38-63 :linenos: @@ -85,7 +85,7 @@ * 至此,\ :code:`lstm_last`\ 便和单层RNN配置中的\ :code:`lstm_last`\ 具有相同的结果了。 -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_layer_group.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_nest_layer_group.conf :language: python :lines: 38-64 :linenos: @@ -107,7 +107,7 @@ - 单层RNN:过了一个很简单的recurrent_group。每一个时间步,当前的输入y和上一个时间步的输出rnn_state做了一个全链接。 -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_rnn.conf :language: python :lines: 36-48 @@ -116,7 +116,7 @@ - 内层inner_step的recurrent_group和单层序列的几乎一样。除了boot_layer=outer_mem,表示将外层的outer_mem作为内层memory的初始状态。外层outer_step中,outer_mem是一个子句的最后一个向量,即整个双层group是将前一个子句的最后一个向量,作为下一个子句memory的初始状态。 - 从输入数据上看,单双层序列的句子是一样的,只是双层序列将其又做了子序列划分。因此双层序列的配置中,必须将前一个子句的最后一个元素,作为boot_layer传给下一个子句的memory,才能保证和单层序列的配置中“每个时间步都用了上一个时间步的输出结果”一致。 -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_nest_rnn.conf :language: python :lines: 39-66 @@ -134,7 +134,7 @@ **输入不等长** 是指recurrent_group的多个输入序列,在每个时间步的子序列长度可以不相等。但序列输出时,需要指定与某一个输入的序列信息是一致的。使用\ :red:`targetInlink`\ 可以指定哪一个输入和输出序列信息一致,默认指定第一个输入。 -示例3的配置分别为\ `单层不等长RNN `_\ 和\ `双层不等长RNN `_\ 。 +示例3的配置分别为\ `单层不等长RNN `_\ 和\ `双层不等长RNN `_\ 。 示例3对于单层RNN和双层RNN数据完全相同。 @@ -152,14 +152,14 @@ * 单层RNN\: -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py :language: python :lines: 42-59 :linenos: * 双层RNN\ \: -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py :language: python :lines: 41-80 :linenos: diff --git a/doc/v2/howto/rnn/hrnn_rnn_api_compare_en.rst b/doc/v2/howto/rnn/hrnn_rnn_api_compare_en.rst index ae997f0805..a4485f7b5e 100644 --- a/doc/v2/howto/rnn/hrnn_rnn_api_compare_en.rst +++ b/doc/v2/howto/rnn/hrnn_rnn_api_compare_en.rst @@ -4,7 +4,7 @@ API comparision between RNN and hierarchical RNN ##################### -This article takes PaddlePaddle's hierarchical RNN unit test as an example. We will use several examples to illestrate the usage of single-layer and hierarchical RNNs. Each example has two model configurations, one for single-layer, and the other for hierarchical RNN. Although the implementations are different, both the two model configurations' effects are the same. All of the examples in this article only describe the API interface of the hierarchical RNN, while we do not use this hierarchical RNN to solve practical problems. If you want to understand the use of hierarchical RNN in specific issues, please refer to \ :ref:`algo_hrnn_demo`\ 。The unit test file used in this article's example is \ `test_RecurrentGradientMachine.cpp `_\ 。 +This article takes PaddlePaddle's hierarchical RNN unit test as an example. We will use several examples to illestrate the usage of single-layer and hierarchical RNNs. Each example has two model configurations, one for single-layer, and the other for hierarchical RNN. Although the implementations are different, both the two model configurations' effects are the same. All of the examples in this article only describe the API interface of the hierarchical RNN, while we do not use this hierarchical RNN to solve practical problems. If you want to understand the use of hierarchical RNN in specific issues, please refer to \ :ref:`algo_hrnn_demo`\ 。The unit test file used in this article's example is \ `test_RecurrentGradientMachine.cpp `_\ 。 Example 1:Hierarchical RNN without Memory between subsequences ================================ @@ -13,8 +13,8 @@ The classical case in the hierarchical RNN is to perform sequence operations on In this example, the network configuration of single-layer RNNs and hierarchical RNNs are all to use LSTM as en encoder to compress a word-segmented sentence into a vector. The difference is that, RNN uses a hierarchical RNN model, treating multiple sentences as a whole to use encoder to compress simultaneously. They are completely consistent in their semantic meanings. This pair of semantically identical example configurations is as follows: -* RNN\: `sequence_layer_group.conf `_ -* Hierarchical RNN\: `sequence_nest_layer_group.conf `_ +* RNN\: `sequence_layer_group.conf `_ +* Hierarchical RNN\: `sequence_nest_layer_group.conf `_ Reading hierarchical sequence data @@ -24,18 +24,18 @@ Firstly, the original data in this example is as follows \: - The original data in this example has 10 samples. Each of the sample includes two components: a lable(all 2 here), and a word-segmented sentence. This data is used by single RNN as well. -.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/Sequence/tour_train_wdseg :language: text - The data for hierarchical RNN has 4 samples. Every sample is seperated by a blank line, while the content of the data is the same as the original data. But as for hierarchical LSTM, the first sample will encode two sentences into two vectors simultaneously. The sentence count dealed simultaneously by this 4 samples are \ :code:`[2, 3, 2, 3]`\ . -.. literalinclude:: ../../../../paddle/gserver/tests/Sequence/tour_train_wdseg.nest +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/Sequence/tour_train_wdseg.nest :language: text -Secondly, as for these two types of different input data formats, the contrast of different DataProviders are as follows (`sequenceGen.py `_)\: +Secondly, as for these two types of different input data formats, the contrast of different DataProviders are as follows (`sequenceGen.py `_)\: -.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequenceGen.py :language: python :lines: 21-39 :linenos: @@ -47,7 +47,7 @@ Secondly, as for these two types of different input data formats, the contrast o - "words" is a list of word table indices corresponding to each word in the sentence in the original data. Its data type is integer_value_sequence, that is integer list. So, "words" is a singler-layer time series in the data. - "label" is the categorical label of each sentence, whose data type is integer_value. -.. literalinclude:: ../../../../paddle/gserver/tests/sequenceGen.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequenceGen.py :language: python :lines: 42-71 :linenos: @@ -64,7 +64,7 @@ Model configuration Firstly, let's look at the configuration of single-layer RNN. The hightlighted part of line 9 to line 15 is the usage of single-layer RNN. Here we use the pre-defined RNN process function in PaddlePaddle. In this function, for each time step, RNN passes through an LSTM network. -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_layer_group.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_layer_group.conf :language: python :lines: 38-63 :linenos: @@ -85,7 +85,7 @@ Secondly, let's look at the model configuration of hierarchical RNN which has th * Till now, \ :code:`lstm_last`\ has the same result as \ :code:`lstm_last`\ in single-layer RNN configuration. -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_layer_group.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_nest_layer_group.conf :language: python :lines: 38-64 :linenos: @@ -107,7 +107,7 @@ We select the different parts between single-layer RNN and hierarchical RNN conf - single-layer RNN:passes through a simple recurrent_group. For each time step, the current input y and the last time step's output rnn_state pass through a fully-connected layer. -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_rnn.conf :language: python :lines: 36-48 @@ -116,7 +116,7 @@ We select the different parts between single-layer RNN and hierarchical RNN conf - The recurrent_group of inner layer's inner_step is nearly the same as single-layer sequence, except for the case of boot_layer=outer_mem, which means using the outer layer's outer_mem as the initial state for the inner layer's memory. In the outer layer's out_step, outer_mem is the last vector of a subsequence, that is, the whole hierarchical group uses the last vector of the previous subsequence as the initial state for the next subsequence's memory. - From the aspect of the input data, sentences from single-layer and hierarchical RNN are the same. The only difference is that, hierarchical RNN disassembes the sequence into subsequences. So in the hierarchical RNN configuration, we must use the last element of the previous subsequence as a boot_layer for the memory of the next subsequence, so that it makes no difference with "every time step uses the output of last time step" in the sigle-layer RNN configuration. -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn.conf +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_nest_rnn.conf :language: python :lines: 39-66 @@ -134,7 +134,7 @@ Example 3:hierarchical RNN with unequal length inputs **unequal length inputs** means in the multiple input sequences of recurrent_group, the lengths of subsequences can be unequal. But the output of the sequence, needs to be consistent with one of the input sequences. Using \ :red:`targetInlink`\ can help you specify which of the input sequences and the output sequence can be consistent, by default is the first input. -The configurations of Example 3 are \ `sequence_rnn_multi_unequalength_inputs `_ \ and \ `sequence_nest_rnn_multi_unequalength_inputs `_\ . +The configurations of Example 3 are \ `sequence_rnn_multi_unequalength_inputs `_ \ and \ `sequence_nest_rnn_multi_unequalength_inputs `_\ . The data for the configurations of Example 3's single-layer RNN and hierarchical RNN are exactly the same. @@ -152,14 +152,14 @@ Similar to Example 2's configuration, Example 3's configuration uses single-laye * single-layer RNN\: -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py :language: python :lines: 42-59 :linenos: * hierarchical RNN\ \: -.. literalinclude:: ../../../../paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py +.. literalinclude:: ../../../../paddle/legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py :language: python :lines: 41-80 :linenos: diff --git a/doc/v2/images/FullyConnected.jpg b/doc/v2/images/FullyConnected.jpg new file mode 100644 index 0000000000..b2241f4014 Binary files /dev/null and b/doc/v2/images/FullyConnected.jpg differ diff --git a/doc/v2/images/add_security_group.png b/doc/v2/images/add_security_group.png new file mode 100644 index 0000000000..bd34f46c9b Binary files /dev/null and b/doc/v2/images/add_security_group.png differ diff --git a/doc/v2/images/bi_lstm.jpg b/doc/v2/images/bi_lstm.jpg new file mode 100644 index 0000000000..adec1606d6 Binary files /dev/null and b/doc/v2/images/bi_lstm.jpg differ diff --git a/doc/v2/images/checkpointing.png b/doc/v2/images/checkpointing.png new file mode 100644 index 0000000000..c221e8474f Binary files /dev/null and b/doc/v2/images/checkpointing.png differ diff --git a/doc/v2/images/create_efs.png b/doc/v2/images/create_efs.png new file mode 100644 index 0000000000..e5f1526033 Binary files /dev/null and b/doc/v2/images/create_efs.png differ diff --git a/doc/v2/images/csr.png b/doc/v2/images/csr.png new file mode 100644 index 0000000000..3dc10b8de4 Binary files /dev/null and b/doc/v2/images/csr.png differ diff --git a/doc/v2/images/data_dispatch.png b/doc/v2/images/data_dispatch.png new file mode 100644 index 0000000000..5bdcc24d6a Binary files /dev/null and b/doc/v2/images/data_dispatch.png differ diff --git a/doc/v2/images/dataset.graffle b/doc/v2/images/dataset.graffle new file mode 100644 index 0000000000..c10a423ed1 Binary files /dev/null and b/doc/v2/images/dataset.graffle differ diff --git a/doc/v2/images/dataset.png b/doc/v2/images/dataset.png new file mode 100644 index 0000000000..2fb7f1cce3 Binary files /dev/null and b/doc/v2/images/dataset.png differ diff --git a/doc/v2/images/doc_en.png b/doc/v2/images/doc_en.png new file mode 100644 index 0000000000..ed6b9178fb Binary files /dev/null and b/doc/v2/images/doc_en.png differ diff --git a/doc/v2/images/efs_mount.png b/doc/v2/images/efs_mount.png new file mode 100644 index 0000000000..0f9e3cab98 Binary files /dev/null and b/doc/v2/images/efs_mount.png differ diff --git a/doc/v2/images/encoder-decoder-attention-model.png b/doc/v2/images/encoder-decoder-attention-model.png new file mode 100644 index 0000000000..79f911d4ba Binary files /dev/null and b/doc/v2/images/encoder-decoder-attention-model.png differ diff --git a/doc/v2/images/engine.png b/doc/v2/images/engine.png new file mode 100644 index 0000000000..1f5f65c2cc Binary files /dev/null and b/doc/v2/images/engine.png differ diff --git a/doc/v2/images/file_storage.graffle b/doc/v2/images/file_storage.graffle new file mode 100644 index 0000000000..50a17e70fa Binary files /dev/null and b/doc/v2/images/file_storage.graffle differ diff --git a/doc/v2/images/file_storage.png b/doc/v2/images/file_storage.png new file mode 100644 index 0000000000..fccb4e3e7e Binary files /dev/null and b/doc/v2/images/file_storage.png differ diff --git a/doc/v2/images/glossary_rnn.dot b/doc/v2/images/glossary_rnn.dot new file mode 100644 index 0000000000..2cd0fb1820 --- /dev/null +++ b/doc/v2/images/glossary_rnn.dot @@ -0,0 +1,42 @@ +digraph G{ + subgraph cluster_timestep0 { + label="recurrent timestep i-1" + bgcolor=lightgray + node [style=filled,color=white] + fc0_0 [label="fc 0"] + fc0_1 [label="fc 1"] + fc0_2 [label="fc 2"] + + fc0_0 -> fc0_1 + fc0_1 -> fc0_2 + } + + subgraph cluster_timestep1 { + label="recurrent timestep i" + node [style=filled]; + fc1_0 [label="fc 0"] + fc1_1 [label="fc 1"] + fc1_2 [label="fc 2"] + color=blue + + fc1_0 -> fc1_1 + fc1_1 -> fc1_2 + } + + subgraph cluster_timestep2 { + label="recurrent timestep i+1" + bgcolor=lightgray + node [style=filled,color=white] + fc2_0 [label="fc 0"] + fc2_1 [label="fc 1"] + fc2_2 [label="fc 2"] + + fc2_0 -> fc2_1 + fc2_1 -> fc2_2 + } + + + fc0_1 -> fc1_1 [style="dotted" constraint=false] + fc1_1 -> fc2_1 [style="dotted" constraint=false] + +} \ No newline at end of file diff --git a/doc/v2/images/glossary_rnn_with_memory.dot b/doc/v2/images/glossary_rnn_with_memory.dot new file mode 100644 index 0000000000..0f101ec2d8 --- /dev/null +++ b/doc/v2/images/glossary_rnn_with_memory.dot @@ -0,0 +1,48 @@ +digraph G{ + subgraph cluster_timestep0 { + label="recurrent timestep i-1" + bgcolor=lightgray + node [style=filled,color=white] + fc0_0 [label="fc 0"] + fc0_1 [label="fc 1"] + fc0_2 [label="fc 2"] + m0 [label="memory"] + fc0_0 -> fc0_1 + fc0_1 -> fc0_2 + fc0_1 -> m0 + m0 -> fc0_1 + } + + subgraph cluster_timestep1 { + label="recurrent timestep i" + node [style=filled]; + fc1_0 [label="fc 0"] + fc1_1 [label="fc 1"] + fc1_2 [label="fc 2"] + m1 [label="memory"] + color=blue + fc1_0 -> fc1_1 + fc1_1 -> fc1_2 + fc1_1 -> m1 + m1 -> fc1_1 + } + + subgraph cluster_timestep2 { + label="recurrent timestep i+1" + bgcolor=lightgray + node [style=filled,color=white] + fc2_0 [label="fc 0"] + fc2_1 [label="fc 1"] + fc2_2 [label="fc 2"] + m2 [label="memory"] + fc2_0 -> fc2_1 + fc2_1 -> fc2_2 + fc2_1 -> m2 + m2 -> fc2_1 + } + + + m0 -> m1 [style="dotted" constraint=false] + m1 -> m2 [style="dotted" constraint=false] + +} \ No newline at end of file diff --git a/doc/v2/images/gradients.png b/doc/v2/images/gradients.png new file mode 100644 index 0000000000..f031bcf8e4 Binary files /dev/null and b/doc/v2/images/gradients.png differ diff --git a/doc/v2/images/init_lock.graffle b/doc/v2/images/init_lock.graffle new file mode 100644 index 0000000000..fa9149f21b Binary files /dev/null and b/doc/v2/images/init_lock.graffle differ diff --git a/doc/v2/images/init_lock.png b/doc/v2/images/init_lock.png new file mode 100644 index 0000000000..92404ee6d6 Binary files /dev/null and b/doc/v2/images/init_lock.png differ diff --git a/doc/v2/images/k8s-paddle-arch.png b/doc/v2/images/k8s-paddle-arch.png new file mode 100644 index 0000000000..b3800c4fe8 Binary files /dev/null and b/doc/v2/images/k8s-paddle-arch.png differ diff --git a/doc/v2/images/layers.png b/doc/v2/images/layers.png new file mode 100644 index 0000000000..306f79b7a8 Binary files /dev/null and b/doc/v2/images/layers.png differ diff --git a/doc/v2/images/managed_policy.png b/doc/v2/images/managed_policy.png new file mode 100644 index 0000000000..c7ecda555b Binary files /dev/null and b/doc/v2/images/managed_policy.png differ diff --git a/doc/v2/images/matrix.png b/doc/v2/images/matrix.png new file mode 100644 index 0000000000..c33ce9cf03 Binary files /dev/null and b/doc/v2/images/matrix.png differ diff --git a/doc/v2/images/nvvp1.png b/doc/v2/images/nvvp1.png new file mode 100644 index 0000000000..1af23ac3c5 Binary files /dev/null and b/doc/v2/images/nvvp1.png differ diff --git a/doc/v2/images/nvvp2.png b/doc/v2/images/nvvp2.png new file mode 100644 index 0000000000..177c9db708 Binary files /dev/null and b/doc/v2/images/nvvp2.png differ diff --git a/doc/v2/images/nvvp3.png b/doc/v2/images/nvvp3.png new file mode 100644 index 0000000000..d8f393667d Binary files /dev/null and b/doc/v2/images/nvvp3.png differ diff --git a/doc/v2/images/nvvp4.png b/doc/v2/images/nvvp4.png new file mode 100644 index 0000000000..51f2f3e183 Binary files /dev/null and b/doc/v2/images/nvvp4.png differ diff --git a/doc/v2/images/overview.png b/doc/v2/images/overview.png new file mode 100644 index 0000000000..8fb7bbb9dd Binary files /dev/null and b/doc/v2/images/overview.png differ diff --git a/doc/v2/images/paddle-cloud-in-data-center.png b/doc/v2/images/paddle-cloud-in-data-center.png new file mode 100644 index 0000000000..da5d1a7756 Binary files /dev/null and b/doc/v2/images/paddle-cloud-in-data-center.png differ diff --git a/doc/v2/images/paddle-etcd.graffle b/doc/v2/images/paddle-etcd.graffle new file mode 100644 index 0000000000..f973dc9b9d Binary files /dev/null and b/doc/v2/images/paddle-etcd.graffle differ diff --git a/doc/v2/images/paddle-etcd.png b/doc/v2/images/paddle-etcd.png new file mode 100644 index 0000000000..57981ceb4b Binary files /dev/null and b/doc/v2/images/paddle-etcd.png differ diff --git a/doc/v2/images/paddle-model-sharding.graffle b/doc/v2/images/paddle-model-sharding.graffle new file mode 100644 index 0000000000..fba30f0ca2 Binary files /dev/null and b/doc/v2/images/paddle-model-sharding.graffle differ diff --git a/doc/v2/images/paddle-model-sharding.png b/doc/v2/images/paddle-model-sharding.png new file mode 100644 index 0000000000..8c3f6724ef Binary files /dev/null and b/doc/v2/images/paddle-model-sharding.png differ diff --git a/doc/v2/images/paddle-ps-0.png b/doc/v2/images/paddle-ps-0.png new file mode 100644 index 0000000000..47ef32806f Binary files /dev/null and b/doc/v2/images/paddle-ps-0.png differ diff --git a/doc/v2/images/paddle-ps-1.png b/doc/v2/images/paddle-ps-1.png new file mode 100644 index 0000000000..f3125db730 Binary files /dev/null and b/doc/v2/images/paddle-ps-1.png differ diff --git a/doc/v2/images/paddle-ps.graffle b/doc/v2/images/paddle-ps.graffle new file mode 100644 index 0000000000..0e536ffdd9 Binary files /dev/null and b/doc/v2/images/paddle-ps.graffle differ diff --git a/doc/v2/images/paddle-task-queues.graffle b/doc/v2/images/paddle-task-queues.graffle new file mode 100644 index 0000000000..4263ed8bfd Binary files /dev/null and b/doc/v2/images/paddle-task-queues.graffle differ diff --git a/doc/v2/images/paddle-task-queues.png b/doc/v2/images/paddle-task-queues.png new file mode 100644 index 0000000000..5f98026679 Binary files /dev/null and b/doc/v2/images/paddle-task-queues.png differ diff --git a/doc/v2/images/paddle-task-states.graffle b/doc/v2/images/paddle-task-states.graffle new file mode 100644 index 0000000000..cf1a0b9246 Binary files /dev/null and b/doc/v2/images/paddle-task-states.graffle differ diff --git a/doc/v2/images/paddle-task-states.png b/doc/v2/images/paddle-task-states.png new file mode 100644 index 0000000000..4ae43cb66c Binary files /dev/null and b/doc/v2/images/paddle-task-states.png differ diff --git a/doc/v2/images/ps_cn.png b/doc/v2/images/ps_cn.png new file mode 100644 index 0000000000..f9525739cc Binary files /dev/null and b/doc/v2/images/ps_cn.png differ diff --git a/doc/v2/images/ps_en.png b/doc/v2/images/ps_en.png new file mode 100644 index 0000000000..6537d3d565 Binary files /dev/null and b/doc/v2/images/ps_en.png differ diff --git a/doc/v2/images/pserver_and_trainer.png b/doc/v2/images/pserver_and_trainer.png new file mode 100644 index 0000000000..f41fe48920 Binary files /dev/null and b/doc/v2/images/pserver_and_trainer.png differ diff --git a/doc/v2/images/pserver_init.graffle b/doc/v2/images/pserver_init.graffle new file mode 100644 index 0000000000..5f3f1f52be Binary files /dev/null and b/doc/v2/images/pserver_init.graffle differ diff --git a/doc/v2/images/pserver_init.png b/doc/v2/images/pserver_init.png new file mode 100644 index 0000000000..dfe491ff98 Binary files /dev/null and b/doc/v2/images/pserver_init.png differ diff --git a/doc/v2/images/route53_create_recordset.png b/doc/v2/images/route53_create_recordset.png new file mode 100644 index 0000000000..34e476c7be Binary files /dev/null and b/doc/v2/images/route53_create_recordset.png differ diff --git a/doc/v2/images/route53_create_zone.png b/doc/v2/images/route53_create_zone.png new file mode 100644 index 0000000000..25b7ddb831 Binary files /dev/null and b/doc/v2/images/route53_create_zone.png differ diff --git a/doc/v2/images/sequence_data.png b/doc/v2/images/sequence_data.png new file mode 100644 index 0000000000..6e47a46b89 Binary files /dev/null and b/doc/v2/images/sequence_data.png differ diff --git a/doc/v2/images/simple_full_hierarchical_recurrent.dot b/doc/v2/images/simple_full_hierarchical_recurrent.dot new file mode 100644 index 0000000000..ff278a0323 --- /dev/null +++ b/doc/v2/images/simple_full_hierarchical_recurrent.dot @@ -0,0 +1,30 @@ +digraph G { + rankdir=LR; + + subgraph cluster_t0 { + a [label="4"] + b [label="5"] + c [label="2"] + } + + subgraph cluster_t1 { + d [label="0"] + e [label="9"] + } + + subgraph cluster_t2 { + f [label="8"] + g [label="1"] + h [label="4"] + } + + a -> b; + b -> c; + c -> d [constraint=false]; + + d -> e; + e -> f [constraint=false]; + + f -> g; + g -> h; +} \ No newline at end of file diff --git a/doc/v2/images/simple_full_recurrent.dot b/doc/v2/images/simple_full_recurrent.dot new file mode 100644 index 0000000000..cee281fbac --- /dev/null +++ b/doc/v2/images/simple_full_recurrent.dot @@ -0,0 +1,19 @@ +digraph G { + rankdir=LR; + a [label="4"] + b [label="5"] + c [label="2"] + d [label="0"] + e [label="9"] + f [label="8"] + g [label="1"] + h [label="4"] + + a -> b; + b -> c; + c -> d; + d -> e; + e -> f; + f -> g; + g -> h; +} \ No newline at end of file diff --git a/doc/v2/images/submit-job.graffle b/doc/v2/images/submit-job.graffle new file mode 100644 index 0000000000..677cdfb6d9 Binary files /dev/null and b/doc/v2/images/submit-job.graffle differ diff --git a/doc/v2/images/submit-job.png b/doc/v2/images/submit-job.png new file mode 100644 index 0000000000..3046a460a7 Binary files /dev/null and b/doc/v2/images/submit-job.png differ diff --git a/doc/v2/images/trainer.graffle b/doc/v2/images/trainer.graffle new file mode 100644 index 0000000000..43415ed8cf Binary files /dev/null and b/doc/v2/images/trainer.graffle differ diff --git a/doc/v2/images/trainer.png b/doc/v2/images/trainer.png new file mode 100644 index 0000000000..6537d3d565 Binary files /dev/null and b/doc/v2/images/trainer.png differ diff --git a/doc/v2/images/trainer_cn.png b/doc/v2/images/trainer_cn.png new file mode 100644 index 0000000000..f9525739cc Binary files /dev/null and b/doc/v2/images/trainer_cn.png differ diff --git a/doc/v2/images/worker_security_group.png b/doc/v2/images/worker_security_group.png new file mode 100644 index 0000000000..57eb0265a3 Binary files /dev/null and b/doc/v2/images/worker_security_group.png differ diff --git a/doc/v2/images/workflow_of_CAPI.png b/doc/v2/images/workflow_of_CAPI.png new file mode 100644 index 0000000000..a4399ade04 Binary files /dev/null and b/doc/v2/images/workflow_of_CAPI.png differ diff --git a/go/pserver/client/c/test/CMakeLists.txt b/go/pserver/client/c/test/CMakeLists.txt index 411dc50332..4500b1f288 100644 --- a/go/pserver/client/c/test/CMakeLists.txt +++ b/go/pserver/client/c/test/CMakeLists.txt @@ -13,4 +13,3 @@ # limitations under the License. # cc_test(test_cclient SRCS test_cclient.c DEPS paddle_pserver_cclient paddle_go_optimizer) -add_style_check_target(test_cclient test_cclient.c) diff --git a/go/pserver/optimizer.go b/go/pserver/optimizer.go index f17577997b..eba0c47e19 100644 --- a/go/pserver/optimizer.go +++ b/go/pserver/optimizer.go @@ -16,7 +16,7 @@ package pserver // #cgo CFLAGS: -I ../../ // #cgo LDFLAGS: ${SRCDIR}/client/c/libpaddle_go_optimizer.a -lstdc++ -lm -// #include "paddle/optimizer/optimizer.h" +// #include "paddle/legacy/optimizer/optimizer.h" // #include // #include import "C" diff --git a/paddle/.gitignore b/paddle/.gitignore index 1c1c0c2c82..01904aa6ef 100644 --- a/paddle/.gitignore +++ b/paddle/.gitignore @@ -11,7 +11,6 @@ GTAGS *.pb.cc *.pb.h *_pb2.py -paddle_* output/ google/ Makefile diff --git a/paddle/CMakeLists.txt b/paddle/CMakeLists.txt index 8b1ca5e165..6653244507 100644 --- a/paddle/CMakeLists.txt +++ b/paddle/CMakeLists.txt @@ -1,29 +1,29 @@ if(NOT WITH_FLUID_ONLY) - add_subdirectory(cuda) - add_subdirectory(function) - add_subdirectory(utils) - add_subdirectory(math) - add_subdirectory(gserver) - add_subdirectory(parameter) + add_subdirectory(legacy/cuda) + add_subdirectory(legacy/function) + add_subdirectory(legacy/utils) + add_subdirectory(legacy/math) + add_subdirectory(legacy/gserver) + add_subdirectory(legacy/parameter) if(MOBILE_INFERENCE) - add_subdirectory(capi) + add_subdirectory(legacy/capi) else() - add_subdirectory(pserver) - add_subdirectory(trainer) + add_subdirectory(legacy/pserver) + add_subdirectory(legacy/trainer) add_subdirectory(scripts) if(WITH_C_API) - add_subdirectory(capi) + add_subdirectory(legacy/capi) endif() if(WITH_SWIG_PY) - add_subdirectory(api) + add_subdirectory(legacy/api) endif() endif() endif() add_subdirectory(testing) -if(NOT MOBILE_INFERENCE AND NOT RPI) +if(NOT MOBILE_INFERENCE AND NOT RPI AND NOT WITH_C_API) add_subdirectory(fluid) endif() diff --git a/paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh b/paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh deleted file mode 120000 index 3c1b353352..0000000000 --- a/paddle/capi/examples/model_inference/multi_thread/convert_protobin.sh +++ /dev/null @@ -1 +0,0 @@ -../dense/convert_protobin.sh \ No newline at end of file diff --git a/paddle/capi/examples/model_inference/sequence/convert_protobin.sh b/paddle/capi/examples/model_inference/sequence/convert_protobin.sh deleted file mode 120000 index 3c1b353352..0000000000 --- a/paddle/capi/examples/model_inference/sequence/convert_protobin.sh +++ /dev/null @@ -1 +0,0 @@ -../dense/convert_protobin.sh \ No newline at end of file diff --git a/paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh b/paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh deleted file mode 120000 index 3c1b353352..0000000000 --- a/paddle/capi/examples/model_inference/sparse_binary/convert_protobin.sh +++ /dev/null @@ -1 +0,0 @@ -../dense/convert_protobin.sh \ No newline at end of file diff --git a/contrib/float16/.gitignore b/paddle/contrib/float16/.gitignore similarity index 100% rename from contrib/float16/.gitignore rename to paddle/contrib/float16/.gitignore diff --git a/contrib/float16/README.md b/paddle/contrib/float16/README.md similarity index 99% rename from contrib/float16/README.md rename to paddle/contrib/float16/README.md index ded959c47c..58b4a50666 100644 --- a/contrib/float16/README.md +++ b/paddle/contrib/float16/README.md @@ -89,7 +89,7 @@ cd Paddle # to `FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04` and similarly for other configurations nvidia-docker build -t paddle:float16 . # After running this, different results will be written to different log files in Paddle/contrib/float16/ -nvidia-docker run -it -v $PWD:/paddle paddle:float16 /paddle/contrib/float16/run_float16_demo.sh +nvidia-docker run -it -v $PWD:/paddle paddle:float16 /paddle/paddle/contrib/float16/run_float16_demo.sh ``` #### Accuracy diff --git a/contrib/float16/float16_benchmark.md b/paddle/contrib/float16/float16_benchmark.md similarity index 100% rename from contrib/float16/float16_benchmark.md rename to paddle/contrib/float16/float16_benchmark.md diff --git a/contrib/float16/float16_inference_demo.py b/paddle/contrib/float16/float16_inference_demo.py similarity index 100% rename from contrib/float16/float16_inference_demo.py rename to paddle/contrib/float16/float16_inference_demo.py diff --git a/contrib/float16/float16_transpiler.py b/paddle/contrib/float16/float16_transpiler.py similarity index 98% rename from contrib/float16/float16_transpiler.py rename to paddle/contrib/float16/float16_transpiler.py index 91ba101edb..66e0345c29 100644 --- a/contrib/float16/float16_transpiler.py +++ b/paddle/contrib/float16/float16_transpiler.py @@ -118,7 +118,7 @@ class Float16Transpiler: for var in self.block.vars.keys(): if var not in args: - self.block.remove_var(var) + self.block._remove_var(var) def _modify_feed_fetch(self): ''' @@ -165,7 +165,7 @@ class Float16Transpiler: dtype=core.VarDesc.VarType.FP16, shape=var.shape, persistable=var.persistable) - self.block.insert_op( + self.block._insert_op( i + 1, type="cast", inputs={"X": var}, @@ -188,7 +188,7 @@ class Float16Transpiler: persistable=var.persistable) find_op(var) var.op.rename_output(var_name, tmp_var_name) - self.block.insert_op( + self.block._insert_op( i, type="cast", inputs={"X": tmp_var}, @@ -253,4 +253,4 @@ class Float16Transpiler: # old var will be replaced by the fp16 var in program desc self.input_map[var.name] = fp16_var_name - self.block.remove_var(var.name) + self.block._remove_var(var.name) diff --git a/contrib/float16/run_float16_demo.sh b/paddle/contrib/float16/run_float16_demo.sh similarity index 95% rename from contrib/float16/run_float16_demo.sh rename to paddle/contrib/float16/run_float16_demo.sh index d8a34ee67b..031225a85d 100755 --- a/contrib/float16/run_float16_demo.sh +++ b/paddle/contrib/float16/run_float16_demo.sh @@ -3,7 +3,7 @@ BUILD_PATH=/paddle/fp16_build WHEEL_PATH=$BUILD_PATH/python/dist INFER_PATH=$BUILD_PATH/paddle/fluid/inference/tests/book -DEMO_PATH=/paddle/contrib/float16 +DEMO_PATH=/paddle/paddle/contrib/float16 # Use the single most powerful CUDA GPU on your machine export CUDA_VISIBLE_DEVICES=0 @@ -50,7 +50,6 @@ do --repeat=1 \ $INFER_PATH/test_inference_image_classification_vgg \ - --data_set=imagenet \ --dirname=$DEMO_PATH/image_classification_imagenet_vgg.inference.model \ --fp16_dirname=$DEMO_PATH/float16_image_classification_imagenet_vgg.inference.model \ --repeat=$REPEAT \ @@ -68,7 +67,6 @@ do --repeat=1 \ $INFER_PATH/test_inference_image_classification_resnet \ - --data_set=imagenet \ --dirname=$DEMO_PATH/image_classification_imagenet_resnet.inference.model \ --fp16_dirname=$DEMO_PATH/float16_image_classification_imagenet_resnet.inference.model \ --repeat=$REPEAT \ @@ -86,7 +84,6 @@ do --repeat=1 \ $INFER_PATH/test_inference_image_classification_vgg \ - --data_set=cifar10 \ --dirname=$DEMO_PATH/image_classification_cifar10_vgg.inference.model \ --fp16_dirname=$DEMO_PATH/float16_image_classification_cifar10_vgg.inference.model \ --repeat=$REPEAT \ @@ -104,7 +101,6 @@ do --repeat=1 \ $INFER_PATH/test_inference_image_classification_vgg \ - --data_set=cifar10 \ --dirname=$DEMO_PATH/image_classification_cifar10_resnet.inference.model \ --fp16_dirname=$DEMO_PATH/float16_image_classification_cifar10_resnet.inference.model \ --repeat=$REPEAT \ diff --git a/paddle/fluid/API.spec b/paddle/fluid/API.spec new file mode 100644 index 0000000000..46e56981ea --- /dev/null +++ b/paddle/fluid/API.spec @@ -0,0 +1,410 @@ +paddle.fluid.Program.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.block ArgSpec(args=['self', 'index'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.clone ArgSpec(args=['self', 'for_test'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.Program.copy_data_info_from ArgSpec(args=['self', 'other'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.create_block ArgSpec(args=['self', 'parent_idx'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.Program.current_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.get_desc ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.global_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.inference_optimize ArgSpec(args=['self', 'export_for_deployment'], varargs=None, keywords=None, defaults=(True,)) +paddle.fluid.Program.list_vars ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.optimized_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.Program.parse_from_string ArgSpec(args=['binary_str'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.prune ArgSpec(args=['self', 'targets'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.rollback ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Program.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.Operator.__init__ ArgSpec(args=['self', 'block', 'desc', 'type', 'inputs', 'outputs', 'attrs'], varargs=None, keywords=None, defaults=(None, None, None, None)) +paddle.fluid.Operator.all_attrs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.attr_type ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.block_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.block_attr_id ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.blocks_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.blocks_attr_ids ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.has_attr ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.has_kernel ArgSpec(args=['self', 'op_type'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.input ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.output ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.rename_input ArgSpec(args=['self', 'old_name', 'new_name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.rename_output ArgSpec(args=['self', 'old_name', 'new_name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.set_attr ArgSpec(args=['self', 'name', 'val'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Operator.to_string ArgSpec(args=['self', 'throw_on_error'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Parameter.__init__ ArgSpec(args=['self', 'block', 'shape', 'dtype'], varargs=None, keywords='kwargs', defaults=None) +paddle.fluid.Parameter.astype ArgSpec(args=['self', 'dtype'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Parameter.to_string ArgSpec(args=['self', 'throw_on_error', 'with_details'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.default_startup_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None) +paddle.fluid.default_main_program ArgSpec(args=[], varargs=None, keywords=None, defaults=None) +paddle.fluid.program_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.get_var ArgSpec(args=['name', 'program'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.Executor.__init__ ArgSpec(args=['self', 'place'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Executor.close ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Executor.run ArgSpec(args=['self', 'program', 'feed', 'fetch_list', 'feed_var_name', 'fetch_var_name', 'scope', 'return_numpy', 'use_program_cache'], varargs=None, keywords=None, defaults=(None, None, None, 'feed', 'fetch', None, True, False)) +paddle.fluid.global_scope ArgSpec(args=[], varargs=None, keywords=None, defaults=None) +paddle.fluid.scope_guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.Trainer.__init__ ArgSpec(args=['self', 'train_func', 'optimizer_func', 'param_path', 'place', 'parallel', 'checkpoint_config'], varargs=None, keywords=None, defaults=(None, None, False, None)) +paddle.fluid.Trainer.save_params ArgSpec(args=['self', 'param_path'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Trainer.stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Trainer.test ArgSpec(args=['self', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=None) +paddle.fluid.Trainer.train ArgSpec(args=['self', 'num_epochs', 'event_handler', 'reader', 'feed_order'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.BeginEpochEvent.__init__ ArgSpec(args=['self', 'epoch_id'], varargs=None, keywords=None, defaults=None) +paddle.fluid.EndEpochEvent.__init__ ArgSpec(args=['self', 'epoch_id'], varargs=None, keywords=None, defaults=None) +paddle.fluid.BeginStepEvent.__init__ ArgSpec(args=['self', 'epoch_id', 'step_id'], varargs=None, keywords=None, defaults=None) +paddle.fluid.EndStepEvent.__init__ ArgSpec(args=['self', 'epoch_id', 'step_id', 'metrics'], varargs=None, keywords=None, defaults=None) +paddle.fluid.CheckpointConfig.__init__ ArgSpec(args=['self', 'checkpoint_dir', 'max_num_checkpoints', 'epoch_interval', 'step_interval'], varargs=None, keywords=None, defaults=(None, 3, 1, 10)) +paddle.fluid.Inferencer.__init__ ArgSpec(args=['self', 'infer_func', 'param_path', 'place', 'parallel'], varargs=None, keywords=None, defaults=(None, False)) +paddle.fluid.Inferencer.infer ArgSpec(args=['self', 'inputs', 'return_numpy'], varargs=None, keywords=None, defaults=(True,)) +paddle.fluid.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None) +paddle.fluid.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program'], varargs=None, keywords=None, defaults=None) +paddle.fluid.DistributeTranspiler.get_trainer_program ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True)) +paddle.fluid.InferenceTranspiler.__init__ +paddle.fluid.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0)) +paddle.fluid.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.DistributeTranspilerConfig.__init__ +paddle.fluid.ParallelExecutor.__init__ ArgSpec(args=['self', 'use_cuda', 'loss_name', 'main_program', 'share_vars_from', 'exec_strategy', 'build_strategy', 'num_trainers', 'trainer_id'], varargs=None, keywords='kwargs', defaults=(None, None, None, None, None, 1, 0)) +paddle.fluid.ParallelExecutor.run ArgSpec(args=['self', 'fetch_list', 'feed', 'feed_dict', 'return_numpy'], varargs=None, keywords=None, defaults=(None, None, True)) +paddle.fluid.ExecutionStrategy.__init__ __init__(self: paddle.fluid.core.ExecutionStrategy) -> None +paddle.fluid.BuildStrategy.GradientScaleStrategy.__init__ __init__(self: paddle.fluid.core.GradientScaleStrategy, arg0: int) -> None +paddle.fluid.BuildStrategy.ReduceStrategy.__init__ __init__(self: paddle.fluid.core.ReduceStrategy, arg0: int) -> None +paddle.fluid.BuildStrategy.__init__ __init__(self: paddle.fluid.core.BuildStrategy) -> None +paddle.fluid.create_lod_tensor ArgSpec(args=['data', 'recursive_seq_lens', 'place'], varargs=None, keywords=None, defaults=None) +paddle.fluid.create_random_int_lodtensor ArgSpec(args=['recursive_seq_lens', 'base_shape', 'place', 'low', 'high'], varargs=None, keywords=None, defaults=None) +paddle.fluid.io.save_vars ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)) +paddle.fluid.io.save_params ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.io.save_persistables ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.io.load_vars ArgSpec(args=['executor', 'dirname', 'main_program', 'vars', 'predicate', 'filename'], varargs=None, keywords=None, defaults=(None, None, None, None)) +paddle.fluid.io.load_params ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.io.load_persistables ArgSpec(args=['executor', 'dirname', 'main_program', 'filename'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.io.save_inference_model ArgSpec(args=['dirname', 'feeded_var_names', 'target_vars', 'executor', 'main_program', 'model_filename', 'params_filename', 'export_for_deployment'], varargs=None, keywords=None, defaults=(None, None, None, True)) +paddle.fluid.io.load_inference_model ArgSpec(args=['dirname', 'executor', 'model_filename', 'params_filename'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.io.get_inference_program ArgSpec(args=['target_vars', 'main_program'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.initializer.ConstantInitializer.__init__ ArgSpec(args=['self', 'value', 'force_cpu'], varargs=None, keywords=None, defaults=(0.0, False)) +paddle.fluid.initializer.UniformInitializer.__init__ ArgSpec(args=['self', 'low', 'high', 'seed'], varargs=None, keywords=None, defaults=(-1.0, 1.0, 0)) +paddle.fluid.initializer.NormalInitializer.__init__ ArgSpec(args=['self', 'loc', 'scale', 'seed'], varargs=None, keywords=None, defaults=(0.0, 1.0, 0)) +paddle.fluid.initializer.XavierInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'fan_out', 'seed'], varargs=None, keywords=None, defaults=(True, None, None, 0)) +paddle.fluid.initializer.BilinearInitializer.__init__ ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.initializer.MSRAInitializer.__init__ ArgSpec(args=['self', 'uniform', 'fan_in', 'seed'], varargs=None, keywords=None, defaults=(True, None, 0)) +paddle.fluid.initializer.force_init_on_cpu ArgSpec(args=[], varargs=None, keywords=None, defaults=None) +paddle.fluid.initializer.init_on_cpu ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.layers.fc ArgSpec(args=['input', 'size', 'num_flatten_dims', 'param_attr', 'bias_attr', 'use_mkldnn', 'act', 'is_test', 'name'], varargs=None, keywords=None, defaults=(1, None, None, False, None, False, None)) +paddle.fluid.layers.embedding ArgSpec(args=['input', 'size', 'is_sparse', 'is_distributed', 'padding_idx', 'param_attr', 'dtype'], varargs=None, keywords=None, defaults=(False, False, None, None, 'float32')) +paddle.fluid.layers.dynamic_lstm ArgSpec(args=['input', 'size', 'h_0', 'c_0', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'float32', None)) +paddle.fluid.layers.dynamic_lstmp ArgSpec(args=['input', 'size', 'proj_size', 'param_attr', 'bias_attr', 'use_peepholes', 'is_reverse', 'gate_activation', 'cell_activation', 'candidate_activation', 'proj_activation', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, None, True, False, 'sigmoid', 'tanh', 'tanh', 'tanh', 'float32', None)) +paddle.fluid.layers.dynamic_gru ArgSpec(args=['input', 'size', 'param_attr', 'bias_attr', 'is_reverse', 'gate_activation', 'candidate_activation', 'h_0'], varargs=None, keywords=None, defaults=(None, None, False, 'sigmoid', 'tanh', None)) +paddle.fluid.layers.gru_unit ArgSpec(args=['input', 'hidden', 'size', 'param_attr', 'bias_attr', 'activation', 'gate_activation'], varargs=None, keywords=None, defaults=(None, None, 'tanh', 'sigmoid')) +paddle.fluid.layers.linear_chain_crf ArgSpec(args=['input', 'label', 'param_attr'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.crf_decoding ArgSpec(args=['input', 'param_attr', 'label'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.cos_sim ArgSpec(args=['X', 'Y'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.cross_entropy ArgSpec(args=['input', 'label', 'soft_label'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.square_error_cost ArgSpec(args=['input', 'label'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.chunk_eval ArgSpec(args=['input', 'label', 'chunk_scheme', 'num_chunk_types', 'excluded_chunk_types'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.sequence_conv ArgSpec(args=['input', 'num_filters', 'filter_size', 'filter_stride', 'padding', 'bias_attr', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(3, 1, None, None, None, None)) +paddle.fluid.layers.conv2d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, False, None, None)) +paddle.fluid.layers.conv3d ArgSpec(args=['input', 'num_filters', 'filter_size', 'stride', 'padding', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'use_mkldnn', 'act', 'name'], varargs=None, keywords=None, defaults=(1, 0, 1, None, None, None, True, False, None, None)) +paddle.fluid.layers.sequence_pool ArgSpec(args=['input', 'pool_type'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.sequence_softmax ArgSpec(args=['input', 'param_attr', 'bias_attr', 'use_cudnn'], varargs=None, keywords=None, defaults=(None, None, True)) +paddle.fluid.layers.softmax ArgSpec(args=['input', 'param_attr', 'bias_attr', 'use_cudnn', 'name'], varargs=None, keywords=None, defaults=(None, None, True, None)) +paddle.fluid.layers.pool2d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'use_mkldnn', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, False, None)) +paddle.fluid.layers.pool3d ArgSpec(args=['input', 'pool_size', 'pool_type', 'pool_stride', 'pool_padding', 'global_pooling', 'use_cudnn', 'ceil_mode', 'use_mkldnn', 'name'], varargs=None, keywords=None, defaults=(-1, 'max', 1, 0, False, True, False, False, None)) +paddle.fluid.layers.batch_norm ArgSpec(args=['input', 'act', 'is_test', 'momentum', 'epsilon', 'param_attr', 'bias_attr', 'data_layout', 'in_place', 'use_mkldnn', 'name', 'moving_mean_name', 'moving_variance_name', 'do_model_average_for_mean_and_var', 'fuse_with_relu'], varargs=None, keywords=None, defaults=(None, False, 0.9, 1e-05, None, None, 'NCHW', False, False, None, None, None, False, False)) +paddle.fluid.layers.beam_search_decode ArgSpec(args=['ids', 'scores', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.conv2d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) +paddle.fluid.layers.conv3d_transpose ArgSpec(args=['input', 'num_filters', 'output_size', 'filter_size', 'padding', 'stride', 'dilation', 'groups', 'param_attr', 'bias_attr', 'use_cudnn', 'act', 'name'], varargs=None, keywords=None, defaults=(None, None, 0, 1, 1, None, None, None, True, None, None)) +paddle.fluid.layers.sequence_expand ArgSpec(args=['x', 'y', 'ref_level', 'name'], varargs=None, keywords=None, defaults=(-1, None)) +paddle.fluid.layers.lstm_unit ArgSpec(args=['x_t', 'hidden_t_prev', 'cell_t_prev', 'forget_bias', 'param_attr', 'bias_attr', 'name'], varargs=None, keywords=None, defaults=(0.0, None, None, None)) +paddle.fluid.layers.reduce_sum ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)) +paddle.fluid.layers.reduce_mean ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)) +paddle.fluid.layers.reduce_max ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)) +paddle.fluid.layers.reduce_min ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)) +paddle.fluid.layers.reduce_prod ArgSpec(args=['input', 'dim', 'keep_dim', 'name'], varargs=None, keywords=None, defaults=(None, False, None)) +paddle.fluid.layers.sequence_first_step ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.sequence_last_step ArgSpec(args=['input'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.dropout ArgSpec(args=['x', 'dropout_prob', 'is_test', 'seed', 'name'], varargs=None, keywords=None, defaults=(False, None, None)) +paddle.fluid.layers.split ArgSpec(args=['input', 'num_or_sections', 'dim', 'name'], varargs=None, keywords=None, defaults=(-1, None)) +paddle.fluid.layers.ctc_greedy_decoder ArgSpec(args=['input', 'blank', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.edit_distance ArgSpec(args=['input', 'label', 'normalized', 'ignored_tokens'], varargs=None, keywords=None, defaults=(True, None)) +paddle.fluid.layers.l2_normalize ArgSpec(args=['x', 'axis', 'epsilon', 'name'], varargs=None, keywords=None, defaults=(1e-12, None)) +paddle.fluid.layers.matmul ArgSpec(args=['x', 'y', 'transpose_x', 'transpose_y', 'name'], varargs=None, keywords=None, defaults=(False, False, None)) +paddle.fluid.layers.topk ArgSpec(args=['input', 'k', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.warpctc ArgSpec(args=['input', 'label', 'blank', 'norm_by_times'], varargs=None, keywords=None, defaults=(0, False)) +paddle.fluid.layers.sequence_reshape ArgSpec(args=['input', 'new_dim'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.transpose ArgSpec(args=['x', 'perm', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.im2sequence ArgSpec(args=['input', 'filter_size', 'stride', 'padding', 'input_image_size', 'out_stride', 'name'], varargs=None, keywords=None, defaults=(1, 1, 0, None, 1, None)) +paddle.fluid.layers.nce ArgSpec(args=['input', 'label', 'num_total_classes', 'sample_weight', 'param_attr', 'bias_attr', 'num_neg_samples'], varargs=None, keywords=None, defaults=(None, None, None, None)) +paddle.fluid.layers.hsigmoid ArgSpec(args=['input', 'label', 'num_classes', 'param_attr', 'bias_attr'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.beam_search ArgSpec(args=['pre_ids', 'pre_scores', 'ids', 'scores', 'beam_size', 'end_id', 'level', 'name'], varargs=None, keywords=None, defaults=(0, None)) +paddle.fluid.layers.row_conv ArgSpec(args=['input', 'future_context_size', 'param_attr', 'act'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.multiplex ArgSpec(args=['inputs', 'index'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.layer_norm ArgSpec(args=['input', 'scale', 'shift', 'begin_norm_axis', 'epsilon', 'param_attr', 'bias_attr', 'act', 'name'], varargs=None, keywords=None, defaults=(True, True, 1, 1e-05, None, None, None, None)) +paddle.fluid.layers.softmax_with_cross_entropy ArgSpec(args=['logits', 'label', 'soft_label'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.smooth_l1 ArgSpec(args=['x', 'y', 'inside_weight', 'outside_weight', 'sigma'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.one_hot ArgSpec(args=['input', 'depth'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.autoincreased_step_counter ArgSpec(args=['counter_name', 'begin', 'step'], varargs=None, keywords=None, defaults=(None, 1, 1)) +paddle.fluid.layers.reshape ArgSpec(args=['x', 'shape', 'actual_shape', 'act', 'inplace', 'name'], varargs=None, keywords=None, defaults=(None, None, True, None)) +paddle.fluid.layers.lod_reset ArgSpec(args=['x', 'y', 'target_lod'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.lrn ArgSpec(args=['input', 'n', 'k', 'alpha', 'beta', 'name'], varargs=None, keywords=None, defaults=(5, 1.0, 0.0001, 0.75, None)) +paddle.fluid.layers.pad ArgSpec(args=['x', 'paddings', 'pad_value', 'name'], varargs=None, keywords=None, defaults=(0.0, None)) +paddle.fluid.layers.label_smooth ArgSpec(args=['label', 'prior_dist', 'epsilon', 'dtype', 'name'], varargs=None, keywords=None, defaults=(None, 0.1, 'float32', None)) +paddle.fluid.layers.roi_pool ArgSpec(args=['input', 'rois', 'pooled_height', 'pooled_width', 'spatial_scale'], varargs=None, keywords=None, defaults=(1, 1, 1.0)) +paddle.fluid.layers.dice_loss ArgSpec(args=['input', 'label', 'epsilon'], varargs=None, keywords=None, defaults=(1e-05,)) +paddle.fluid.layers.image_resize ArgSpec(args=['input', 'out_shape', 'scale', 'name', 'resample'], varargs=None, keywords=None, defaults=(None, None, None, 'BILINEAR')) +paddle.fluid.layers.image_resize_short ArgSpec(args=['input', 'out_short_len', 'resample'], varargs=None, keywords=None, defaults=('BILINEAR',)) +paddle.fluid.layers.resize_bilinear ArgSpec(args=['input', 'out_shape', 'scale', 'name'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.gather ArgSpec(args=['input', 'index'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.random_crop ArgSpec(args=['x', 'shape', 'seed'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.mean_iou ArgSpec(args=['input', 'label', 'num_classes'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.relu ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.log ArgSpec(args=['x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.crop ArgSpec(args=['x', 'shape', 'offsets', 'name'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.rank_loss ArgSpec(args=['label', 'left', 'right', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.data ArgSpec(args=['name', 'shape', 'append_batch_size', 'dtype', 'lod_level', 'type', 'stop_gradient'], varargs=None, keywords=None, defaults=(True, 'float32', 0, VarType.LOD_TENSOR, True)) +paddle.fluid.layers.open_recordio_file ArgSpec(args=['filename', 'shapes', 'lod_levels', 'dtypes', 'pass_num', 'for_parallel'], varargs=None, keywords=None, defaults=(1, True)) +paddle.fluid.layers.open_files ArgSpec(args=['filenames', 'shapes', 'lod_levels', 'dtypes', 'thread_num', 'buffer_size', 'pass_num', 'is_test'], varargs=None, keywords=None, defaults=(None, None, 1, None)) +paddle.fluid.layers.read_file ArgSpec(args=['reader'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.shuffle ArgSpec(args=['reader', 'buffer_size'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.batch ArgSpec(args=['reader', 'batch_size'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.double_buffer ArgSpec(args=['reader', 'place', 'name'], varargs=None, keywords=None, defaults=(None, None)) +paddle.fluid.layers.random_data_generator ArgSpec(args=['low', 'high', 'shapes', 'lod_levels', 'for_parallel'], varargs=None, keywords=None, defaults=(True,)) +paddle.fluid.layers.py_reader ArgSpec(args=['capacity', 'shapes', 'dtypes', 'lod_levels', 'name', 'use_double_buffer'], varargs=None, keywords=None, defaults=(None, None, True)) +paddle.fluid.layers.Preprocessor.__init__ ArgSpec(args=['self', 'reader', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.Preprocessor.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.layers.Preprocessor.inputs ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.Preprocessor.outputs ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None) +paddle.fluid.layers.load ArgSpec(args=['out', 'file_path', 'load_as_fp16'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.create_tensor ArgSpec(args=['dtype', 'name', 'persistable'], varargs=None, keywords=None, defaults=(None, False)) +paddle.fluid.layers.create_parameter ArgSpec(args=['shape', 'dtype', 'name', 'attr', 'is_bias', 'default_initializer'], varargs=None, keywords=None, defaults=(None, None, False, None)) +paddle.fluid.layers.create_global_var ArgSpec(args=['shape', 'value', 'dtype', 'persistable', 'force_cpu', 'name'], varargs=None, keywords=None, defaults=(False, False, None)) +paddle.fluid.layers.cast ArgSpec(args=['x', 'dtype'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.concat ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(0, None)) +paddle.fluid.layers.sums ArgSpec(args=['input', 'out'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.assign ArgSpec(args=['input', 'output'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.fill_constant_batch_size_like ArgSpec(args=['input', 'shape', 'dtype', 'value', 'input_dim_idx', 'output_dim_idx'], varargs=None, keywords=None, defaults=(0, 0)) +paddle.fluid.layers.fill_constant ArgSpec(args=['shape', 'dtype', 'value', 'force_cpu', 'out'], varargs=None, keywords=None, defaults=(False, None)) +paddle.fluid.layers.argmin ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)) +paddle.fluid.layers.argmax ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=(0,)) +paddle.fluid.layers.argsort ArgSpec(args=['input', 'axis', 'name'], varargs=None, keywords=None, defaults=(-1, None)) +paddle.fluid.layers.ones ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.zeros ArgSpec(args=['shape', 'dtype', 'force_cpu'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.reverse ArgSpec(args=['x', 'axis'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.While.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.While.block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.Switch.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.Switch.case ArgSpec(args=['self', 'condition'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.Switch.default ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.increment ArgSpec(args=['x', 'value', 'in_place'], varargs=None, keywords=None, defaults=(1.0, True)) +paddle.fluid.layers.array_write ArgSpec(args=['x', 'i', 'array'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.create_array ArgSpec(args=['dtype'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.less_than ArgSpec(args=['x', 'y', 'force_cpu', 'cond'], varargs=None, keywords='ignored', defaults=(None, None)) +paddle.fluid.layers.equal ArgSpec(args=['x', 'y', 'cond'], varargs=None, keywords='ignored', defaults=(None,)) +paddle.fluid.layers.array_read ArgSpec(args=['array', 'i'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.array_length ArgSpec(args=['array'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.IfElse.__init__ ArgSpec(args=['self', 'cond', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.IfElse.false_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.IfElse.input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.IfElse.output ArgSpec(args=['self'], varargs='outs', keywords=None, defaults=None) +paddle.fluid.layers.IfElse.true_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.DynamicRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.DynamicRNN.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.layers.DynamicRNN.memory ArgSpec(args=['self', 'init', 'shape', 'value', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, False, 'float32')) +paddle.fluid.layers.DynamicRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None) +paddle.fluid.layers.DynamicRNN.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.DynamicRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.DynamicRNN.update_memory ArgSpec(args=['self', 'ex_mem', 'new_mem'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.StaticRNN.__init__ ArgSpec(args=['self', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.StaticRNN.memory ArgSpec(args=['self', 'init', 'shape', 'batch_ref', 'init_value', 'init_batch_dim_idx', 'ref_batch_dim_idx'], varargs=None, keywords=None, defaults=(None, None, None, 0.0, 0, 1)) +paddle.fluid.layers.StaticRNN.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None) +paddle.fluid.layers.StaticRNN.step ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.StaticRNN.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.StaticRNN.step_output ArgSpec(args=['self', 'o'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.StaticRNN.update_memory ArgSpec(args=['self', 'mem', 'var'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.reorder_lod_tensor_by_rank ArgSpec(args=['x', 'rank_table'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.ParallelDo.__init__ ArgSpec(args=['self', 'places', 'use_nccl', 'name'], varargs=None, keywords=None, defaults=(False, None)) +paddle.fluid.layers.ParallelDo.do ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.ParallelDo.get_parameters ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.ParallelDo.parent_block ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.ParallelDo.read_input ArgSpec(args=['self', 'var'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.ParallelDo.write_output ArgSpec(args=['self', 'var'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.Print ArgSpec(args=['input', 'first_n', 'message', 'summarize', 'print_tensor_name', 'print_tensor_type', 'print_tensor_shape', 'print_tensor_lod', 'print_phase'], varargs=None, keywords=None, defaults=(-1, None, -1, True, True, True, True, 'both')) +paddle.fluid.layers.is_empty ArgSpec(args=['x', 'cond'], varargs=None, keywords='ignored', defaults=(None,)) +paddle.fluid.layers.mean ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.scale ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.sigmoid_cross_entropy_with_logits ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_add ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_div ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_sub ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_mul ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_max ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_min ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elementwise_pow ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.clip ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.clip_by_norm ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.logical_and ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.logical_or ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.logical_xor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.logical_not ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.uniform_random_batch_size_like ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.gaussian_random ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.gaussian_random_batch_size_like ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.scatter ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.sum ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.slice ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.shape ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.maxout ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.sigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.logsigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.exp ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.tanh ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.tanh_shrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.softshrink ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.sqrt ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.abs ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.ceil ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.floor ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.cos ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.sin ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.round ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.reciprocal ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.square ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.softplus ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.softsign ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.brelu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.leaky_relu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.soft_relu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.elu ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.relu6 ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.pow ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.stanh ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.hard_sigmoid ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.swish ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.uniform_random ArgSpec(args=['shape', 'dtype', 'min', 'max', 'seed'], varargs=None, keywords=None, defaults=(None, None, None, None)) +paddle.fluid.layers.hard_shrink ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.cumsum ArgSpec(args=['x', 'axis', 'exclusive', 'reverse'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.thresholded_relu ArgSpec(args=['x', 'threshold'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.layers.prior_box ArgSpec(args=['input', 'image', 'min_sizes', 'max_sizes', 'aspect_ratios', 'variance', 'flip', 'clip', 'steps', 'offset', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, [1.0], [0.1, 0.1, 0.2, 0.2], False, False, [0.0, 0.0], 0.5, None, False)) +paddle.fluid.layers.multi_box_head ArgSpec(args=['inputs', 'image', 'base_size', 'num_classes', 'aspect_ratios', 'min_ratio', 'max_ratio', 'min_sizes', 'max_sizes', 'steps', 'step_w', 'step_h', 'offset', 'variance', 'flip', 'clip', 'kernel_size', 'pad', 'stride', 'name', 'min_max_aspect_ratios_order'], varargs=None, keywords=None, defaults=(None, None, None, None, None, None, None, 0.5, [0.1, 0.1, 0.2, 0.2], True, False, 1, 0, 1, None, False)) +paddle.fluid.layers.bipartite_match ArgSpec(args=['dist_matrix', 'match_type', 'dist_threshold', 'name'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.target_assign ArgSpec(args=['input', 'matched_indices', 'negative_indices', 'mismatch_value', 'name'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.layers.detection_output ArgSpec(args=['loc', 'scores', 'prior_box', 'prior_box_var', 'background_label', 'nms_threshold', 'nms_top_k', 'keep_top_k', 'score_threshold', 'nms_eta'], varargs=None, keywords=None, defaults=(0, 0.3, 400, 200, 0.01, 1.0)) +paddle.fluid.layers.ssd_loss ArgSpec(args=['location', 'confidence', 'gt_box', 'gt_label', 'prior_box', 'prior_box_var', 'background_label', 'overlap_threshold', 'neg_pos_ratio', 'neg_overlap', 'loc_loss_weight', 'conf_loss_weight', 'match_type', 'mining_type', 'normalize', 'sample_size'], varargs=None, keywords=None, defaults=(None, 0, 0.5, 3.0, 0.5, 1.0, 1.0, 'per_prediction', 'max_negative', True, None)) +paddle.fluid.layers.detection_map ArgSpec(args=['detect_res', 'label', 'class_num', 'background_label', 'overlap_threshold', 'evaluate_difficult', 'has_state', 'input_states', 'out_states', 'ap_version'], varargs=None, keywords=None, defaults=(0, 0.3, True, None, None, None, 'integral')) +paddle.fluid.layers.rpn_target_assign ArgSpec(args=['loc', 'scores', 'anchor_box', 'gt_box', 'rpn_batch_size_per_im', 'fg_fraction', 'rpn_positive_overlap', 'rpn_negative_overlap'], varargs=None, keywords=None, defaults=(256, 0.25, 0.7, 0.3)) +paddle.fluid.layers.anchor_generator ArgSpec(args=['input', 'anchor_sizes', 'aspect_ratios', 'variance', 'stride', 'offset', 'name'], varargs=None, keywords=None, defaults=(None, None, [0.1, 0.1, 0.2, 0.2], None, 0.5, None)) +paddle.fluid.layers.iou_similarity ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.box_coder ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.polygon_box_transform ArgSpec(args=[], varargs='args', keywords='kwargs', defaults=None) +paddle.fluid.layers.accuracy ArgSpec(args=['input', 'label', 'k', 'correct', 'total'], varargs=None, keywords=None, defaults=(1, None, None)) +paddle.fluid.layers.auc ArgSpec(args=['input', 'label', 'curve', 'num_thresholds', 'topk'], varargs=None, keywords=None, defaults=('ROC', 200, 1)) +paddle.fluid.layers.exponential_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.natural_exp_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.inverse_time_decay ArgSpec(args=['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], varargs=None, keywords=None, defaults=(False,)) +paddle.fluid.layers.polynomial_decay ArgSpec(args=['learning_rate', 'decay_steps', 'end_learning_rate', 'power', 'cycle'], varargs=None, keywords=None, defaults=(0.0001, 1.0, False)) +paddle.fluid.layers.piecewise_decay ArgSpec(args=['boundaries', 'values'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.noam_decay ArgSpec(args=['d_model', 'warmup_steps'], varargs=None, keywords=None, defaults=None) +paddle.fluid.layers.append_LARS ArgSpec(args=['params_grads', 'learning_rate', 'weight_decay'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.InitState.__init__ ArgSpec(args=['self', 'init', 'shape', 'value', 'init_boot', 'need_reorder', 'dtype'], varargs=None, keywords=None, defaults=(None, None, 0.0, None, False, 'float32')) +paddle.fluid.contrib.StateCell.__init__ ArgSpec(args=['self', 'inputs', 'states', 'out_state', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.contrib.StateCell.compute_state ArgSpec(args=['self', 'inputs'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.StateCell.get_input ArgSpec(args=['self', 'input_name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.StateCell.get_state ArgSpec(args=['self', 'state_name'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.StateCell.out_state ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.StateCell.set_state ArgSpec(args=['self', 'state_name', 'state_value'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.StateCell.state_updater ArgSpec(args=['self', 'updater'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.StateCell.update_states ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.TrainingDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'name'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.contrib.TrainingDecoder.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.contrib.TrainingDecoder.output ArgSpec(args=['self'], varargs='outputs', keywords=None, defaults=None) +paddle.fluid.contrib.TrainingDecoder.static_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.TrainingDecoder.step_input ArgSpec(args=['self', 'x'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.BeamSearchDecoder.__init__ ArgSpec(args=['self', 'state_cell', 'init_ids', 'init_scores', 'target_dict_dim', 'word_dim', 'input_var_dict', 'topk_size', 'sparse_emb', 'max_len', 'beam_size', 'end_id', 'name'], varargs=None, keywords=None, defaults=({}, 50, True, 100, 1, 1, None)) +paddle.fluid.contrib.BeamSearchDecoder.block ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.contrib.BeamSearchDecoder.decode ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.BeamSearchDecoder.early_stop ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.BeamSearchDecoder.read_array ArgSpec(args=['self', 'init', 'is_ids', 'is_scores'], varargs=None, keywords=None, defaults=(False, False)) +paddle.fluid.contrib.BeamSearchDecoder.update_array ArgSpec(args=['self', 'array', 'value'], varargs=None, keywords=None, defaults=None) +paddle.fluid.contrib.memory_usage ArgSpec(args=['program', 'batch_size'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.DistributeTranspiler.__init__ ArgSpec(args=['self', 'config'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.transpiler.DistributeTranspiler.get_pserver_program ArgSpec(args=['self', 'endpoint'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.DistributeTranspiler.get_startup_program ArgSpec(args=['self', 'endpoint', 'pserver_program'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.DistributeTranspiler.get_trainer_program ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.DistributeTranspiler.transpile ArgSpec(args=['self', 'trainer_id', 'program', 'pservers', 'trainers', 'sync_mode'], varargs=None, keywords=None, defaults=(None, '127.0.0.1:6174', 1, True)) +paddle.fluid.transpiler.InferenceTranspiler.__init__ +paddle.fluid.transpiler.InferenceTranspiler.transpile ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.transpiler.memory_optimize ArgSpec(args=['input_program', 'skip_opt_set', 'print_log', 'level'], varargs=None, keywords=None, defaults=(None, False, 0)) +paddle.fluid.transpiler.release_memory ArgSpec(args=['input_program', 'skip_opt_set'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.transpiler.HashName.__init__ ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.HashName.dispatch ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.HashName.reset ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.RoundRobin.__init__ ArgSpec(args=['self', 'pserver_endpoints'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.RoundRobin.dispatch ArgSpec(args=['self', 'varlist'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.RoundRobin.reset ArgSpec(args=['self'], varargs=None, keywords=None, defaults=None) +paddle.fluid.transpiler.DistributeTranspilerConfig.__init__ +paddle.fluid.nets.simple_img_conv_pool ArgSpec(args=['input', 'num_filters', 'filter_size', 'pool_size', 'pool_stride', 'pool_padding', 'pool_type', 'global_pooling', 'conv_stride', 'conv_padding', 'conv_dilation', 'conv_groups', 'param_attr', 'bias_attr', 'act', 'use_cudnn', 'use_mkldnn'], varargs=None, keywords=None, defaults=(0, 'max', False, 1, 0, 1, 1, None, None, None, True, False)) +paddle.fluid.nets.sequence_conv_pool ArgSpec(args=['input', 'num_filters', 'filter_size', 'param_attr', 'act', 'pool_type'], varargs=None, keywords=None, defaults=(None, 'sigmoid', 'max')) +paddle.fluid.nets.glu ArgSpec(args=['input', 'dim'], varargs=None, keywords=None, defaults=(-1,)) +paddle.fluid.nets.scaled_dot_product_attention ArgSpec(args=['queries', 'keys', 'values', 'num_heads', 'dropout_rate'], varargs=None, keywords=None, defaults=(1, 0.0)) +paddle.fluid.optimizer.SGDOptimizer.__init__ ArgSpec(args=['self', 'learning_rate'], varargs=None, keywords='kwargs', defaults=None) +paddle.fluid.optimizer.SGDOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.MomentumOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'momentum', 'use_nesterov'], varargs=None, keywords='kwargs', defaults=(False,)) +paddle.fluid.optimizer.MomentumOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.AdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon'], varargs=None, keywords='kwargs', defaults=(1e-06,)) +paddle.fluid.optimizer.AdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.AdamOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon'], varargs=None, keywords='kwargs', defaults=(0.001, 0.9, 0.999, 1e-08)) +paddle.fluid.optimizer.AdamOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.AdamaxOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'beta1', 'beta2', 'epsilon'], varargs=None, keywords='kwargs', defaults=(0.001, 0.9, 0.999, 1e-08)) +paddle.fluid.optimizer.AdamaxOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.DecayedAdagradOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'decay', 'epsilon'], varargs=None, keywords='kwargs', defaults=(0.95, 1e-06)) +paddle.fluid.optimizer.DecayedAdagradOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.FtrlOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'l1', 'l2', 'lr_power'], varargs=None, keywords='kwargs', defaults=(0.0, 0.0, -0.5)) +paddle.fluid.optimizer.FtrlOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.RMSPropOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'rho', 'epsilon', 'momentum'], varargs=None, keywords='kwargs', defaults=(0.95, 1e-06, 0.0)) +paddle.fluid.optimizer.RMSPropOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.AdadeltaOptimizer.__init__ ArgSpec(args=['self', 'learning_rate', 'epsilon', 'rho'], varargs=None, keywords='kwargs', defaults=(1e-06, 0.95)) +paddle.fluid.optimizer.AdadeltaOptimizer.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.ModelAverage.__init__ ArgSpec(args=['self', 'average_window_rate', 'min_average_window', 'max_average_window'], varargs=None, keywords='kwargs', defaults=(10000, 10000)) +paddle.fluid.optimizer.ModelAverage.apply ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.optimizer.ModelAverage.minimize ArgSpec(args=['self', 'loss', 'startup_program', 'parameter_list', 'no_grad_set'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.optimizer.ModelAverage.restore ArgSpec(args=['self', 'executor'], varargs=None, keywords=None, defaults=None) +paddle.fluid.backward.append_backward ArgSpec(args=['loss', 'parameter_list', 'no_grad_set', 'callbacks'], varargs=None, keywords=None, defaults=(None, None, None)) +paddle.fluid.regularizer.L1DecayRegularizer.__init__ ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)) +paddle.fluid.regularizer.L2DecayRegularizer.__init__ ArgSpec(args=['self', 'regularization_coeff'], varargs=None, keywords=None, defaults=(0.0,)) +paddle.fluid.LoDTensor.__init__ 1. __init__(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None 2. __init__(self: paddle.fluid.core.LoDTensor) -> None +paddle.fluid.LoDTensor.has_valid_recursive_sequence_lengths has_valid_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> bool +paddle.fluid.LoDTensor.lod lod(self: paddle.fluid.core.LoDTensor) -> List[List[int]] +paddle.fluid.LoDTensor.recursive_sequence_lengths recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor) -> List[List[int]] +paddle.fluid.LoDTensor.set 1. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CPUPlace) -> None 2. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CPUPlace) -> None 3. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CPUPlace) -> None 4. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CPUPlace) -> None 5. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CPUPlace) -> None 6. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CPUPlace) -> None 7. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CPUPlace) -> None 8. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPlace) -> None 9. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPlace) -> None 10. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPlace) -> None 11. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPlace) -> None 12. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPlace) -> None 13. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPlace) -> None 14. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPlace) -> None 15. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float32], arg1: paddle::platform::CUDAPinnedPlace) -> None 16. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int32], arg1: paddle::platform::CUDAPinnedPlace) -> None 17. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[float64], arg1: paddle::platform::CUDAPinnedPlace) -> None 18. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[int64], arg1: paddle::platform::CUDAPinnedPlace) -> None 19. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[bool], arg1: paddle::platform::CUDAPinnedPlace) -> None 20. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint16], arg1: paddle::platform::CUDAPinnedPlace) -> None 21. set(self: paddle.fluid.core.Tensor, arg0: numpy.ndarray[uint8], arg1: paddle::platform::CUDAPinnedPlace) -> None +paddle.fluid.LoDTensor.set_lod set_lod(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None +paddle.fluid.LoDTensor.set_recursive_sequence_lengths set_recursive_sequence_lengths(self: paddle.fluid.core.LoDTensor, arg0: List[List[int]]) -> None +paddle.fluid.LoDTensor.shape shape(self: paddle.fluid.core.Tensor) -> List[int] +paddle.fluid.LoDTensorArray.__init__ __init__(self: paddle.fluid.core.LoDTensorArray) -> None +paddle.fluid.LoDTensorArray.append append(self: paddle.fluid.core.LoDTensorArray, arg0: paddle.fluid.core.LoDTensor) -> None +paddle.fluid.CPUPlace.__init__ __init__(self: paddle.fluid.core.CPUPlace) -> None +paddle.fluid.CUDAPlace.__init__ __init__(self: paddle.fluid.core.CUDAPlace, arg0: int) -> None +paddle.fluid.CUDAPinnedPlace.__init__ __init__(self: paddle.fluid.core.CUDAPinnedPlace) -> None +paddle.fluid.ParamAttr.__init__ ArgSpec(args=['self', 'name', 'initializer', 'learning_rate', 'regularizer', 'trainable', 'gradient_clip', 'do_model_average'], varargs=None, keywords=None, defaults=(None, None, 1.0, None, True, None, False)) +paddle.fluid.WeightNormParamAttr.__init__ ArgSpec(args=['self', 'dim'], varargs=None, keywords='kwargs', defaults=(None,)) +paddle.fluid.DataFeeder.__init__ ArgSpec(args=['self', 'feed_list', 'place', 'program'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.DataFeeder.decorate_reader ArgSpec(args=['self', 'reader', 'multi_devices', 'num_places', 'drop_last'], varargs=None, keywords=None, defaults=(None, True)) +paddle.fluid.DataFeeder.feed ArgSpec(args=['self', 'iterable'], varargs=None, keywords=None, defaults=None) +paddle.fluid.DataFeeder.feed_parallel ArgSpec(args=['self', 'iterable', 'num_places'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.clip.ErrorClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.clip.GradientClipByValue.__init__ ArgSpec(args=['self', 'max', 'min'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.clip.GradientClipByNorm.__init__ ArgSpec(args=['self', 'clip_norm'], varargs=None, keywords=None, defaults=None) +paddle.fluid.clip.GradientClipByGlobalNorm.__init__ ArgSpec(args=['self', 'clip_norm', 'group_name'], varargs=None, keywords=None, defaults=('default_group',)) +paddle.fluid.profiler.cuda_profiler ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.profiler.reset_profiler ArgSpec(args=[], varargs=None, keywords=None, defaults=None) +paddle.fluid.profiler.profiler ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.profiler.start_profiler ArgSpec(args=['state'], varargs=None, keywords=None, defaults=None) +paddle.fluid.profiler.stop_profiler ArgSpec(args=['sorted_key', 'profile_path'], varargs=None, keywords=None, defaults=(None, '/tmp/profile')) +paddle.fluid.unique_name.generate ArgSpec(args=['key'], varargs=None, keywords=None, defaults=None) +paddle.fluid.unique_name.switch ArgSpec(args=['new_generator'], varargs=None, keywords=None, defaults=(None,)) +paddle.fluid.unique_name.guard ArgSpec(args=[], varargs='args', keywords='kwds', defaults=None) +paddle.fluid.recordio_writer.convert_reader_to_recordio_file ArgSpec(args=['filename', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)) +paddle.fluid.recordio_writer.convert_reader_to_recordio_files ArgSpec(args=['filename', 'batch_per_file', 'reader_creator', 'feeder', 'compressor', 'max_num_records', 'feed_order'], varargs=None, keywords=None, defaults=(Compressor.Snappy, 1000, None)) +paddle.fluid.Scope.__init__ __init__(self: paddle.fluid.core.Scope) -> None +paddle.fluid.Scope.drop_kids drop_kids(self: paddle.fluid.core.Scope) -> None +paddle.fluid.Scope.find_var find_var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable +paddle.fluid.Scope.new_scope new_scope(self: paddle.fluid.core.Scope) -> paddle.fluid.core.Scope +paddle.fluid.Scope.var var(self: paddle.fluid.core.Scope, arg0: unicode) -> paddle.fluid.core.Variable diff --git a/paddle/fluid/CMakeLists.txt b/paddle/fluid/CMakeLists.txt index d274d96c29..2577e59d9c 100644 --- a/paddle/fluid/CMakeLists.txt +++ b/paddle/fluid/CMakeLists.txt @@ -5,5 +5,7 @@ add_subdirectory(operators) add_subdirectory(pybind) add_subdirectory(string) add_subdirectory(recordio) -# NOTE: please add subdirectory inference at last. -add_subdirectory(inference) +if(WITH_INFERENCE) + # NOTE: please add subdirectory inference at last. + add_subdirectory(inference) +endif() diff --git a/paddle/fluid/framework/CMakeLists.txt b/paddle/fluid/framework/CMakeLists.txt index 340b891e41..1d62792b80 100644 --- a/paddle/fluid/framework/CMakeLists.txt +++ b/paddle/fluid/framework/CMakeLists.txt @@ -1,15 +1,17 @@ add_subdirectory(details) +add_subdirectory(ir) # ddim lib proto_library(framework_proto SRCS framework.proto) cc_library(ddim SRCS ddim.cc DEPS eigen3 boost) cc_test(ddim_test SRCS ddim_test.cc DEPS ddim) nv_test(dim_test SRCS dim_test.cu DEPS ddim) - +cc_library(data_type SRCS data_type.cc DEPS framework_proto ddim device_context) +cc_test(data_type_test SRCS data_type_test.cc DEPS data_type place tensor) if(WITH_GPU) - nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS ddim place memory device_context framework_proto) + nv_library(tensor SRCS tensor.cc tensor_util.cu DEPS place memory data_type device_context) else() - cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS ddim place memory device_context framework_proto) + cc_library(tensor SRCS tensor.cc tensor_util.cc DEPS place memory data_type device_context) endif() cc_test(tensor_test SRCS tensor_test.cc DEPS tensor) @@ -21,12 +23,18 @@ endif() cc_test(eigen_test SRCS eigen_test.cc DEPS tensor) -nv_test(mixed_vector_test SRCS mixed_vector_test.cu DEPS place memory device_context init) +if(WITH_GPU) + nv_test(mixed_vector_test SRCS mixed_vector_test.cc mixed_vector_test.cu DEPS place memory device_context tensor) +else() + cc_test(mixed_vector_test SRCS mixed_vector_test.cc DEPS place memory device_context tensor) +endif() + cc_library(lod_tensor SRCS lod_tensor.cc DEPS ddim place tensor framework_proto recordio) cc_test(lod_tensor_test SRCS lod_tensor_test.cc DEPS lod_tensor memory) -nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor init) +nv_test(lod_tensor_gpu_test SRCS lod_tensor_test.cu DEPS lod_tensor) cc_library(reader SRCS reader.cc DEPS lod_tensor ddim) +cc_test(reader_test SRCS reader_test.cc DEPS reader) cc_test(variable_test SRCS variable_test.cc) @@ -38,7 +46,7 @@ cc_test(scope_test SRCS scope_test.cc DEPS scope) cc_library(data_device_transform SRCS data_device_transform.cc DEPS tensor) nv_test(data_device_transform_test SRCS data_device_transform_test.cu - DEPS operator op_registry init math_function) + DEPS operator op_registry device_context math_function) if(WITH_GPU) nv_library(data_type_transform SRCS data_type_transform.cu DEPS tensor) @@ -57,13 +65,13 @@ cc_library(data_transform SRCS data_transform.cc DEPS math_function tensor cc_library(attribute SRCS attribute.cc DEPS framework_proto boost) cc_test(program_desc_test SRCS program_desc_test.cc DEPS proto_desc device_context) -cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute) +cc_library(op_proto_maker SRCS op_proto_maker.cc DEPS framework_proto attribute glog) cc_test(op_proto_maker_test SRCS op_proto_maker_test.cc DEPS op_proto_maker) cc_library(op_info SRCS op_info.cc DEPS attribute framework_proto) cc_library(shape_inference SRCS shape_inference.cc DEPS ddim attribute device_context) cc_library(operator SRCS operator.cc DEPS op_info device_context tensor scope glog shape_inference data_transform lod_tensor profiler) -cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry init) +cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry device_context) cc_library(proto_desc SRCS var_desc.cc op_desc.cc block_desc.cc program_desc.cc DEPS shape_inference op_info operator glog) cc_library(op_registry SRCS op_registry.cc DEPS op_proto_maker op_info operator glog proto_desc) @@ -83,11 +91,16 @@ cc_library(lod_rank_table SRCS lod_rank_table.cc DEPS lod_tensor) cc_library(feed_fetch_method SRCS feed_fetch_method.cc DEPS lod_tensor scope glog) -cc_library(executor SRCS executor.cc DEPS op_registry device_context scope -framework_proto glog lod_rank_table feed_fetch_method) +if(WITH_DISTRIBUTE) + cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method sendrecvop_grpc cares grpc++_unsecure grpc_unsecure gpr) + set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + set_source_files_properties(executor.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) +else() + cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto glog lod_rank_table feed_fetch_method) +endif() -cc_library(parallel_executor SRCS parallel_executor.cc DEPS multi_devices_graph_builder threaded_ssa_graph_executor) +cc_library(parallel_executor SRCS parallel_executor.cc DEPS threaded_ssa_graph_executor scope_buffered_ssa_graph_executor graph graph_viz_pass multi_devices_graph_pass multi_devices_graph_print_pass multi_devices_graph_check_pass) cc_library(prune SRCS prune.cc DEPS framework_proto) cc_test(prune_test SRCS prune_test.cc DEPS op_info prune recurrent_op device_context) @@ -96,14 +109,14 @@ cc_test(var_type_inference_test SRCS var_type_inference_test.cc DEPS op_registry cc_library(selected_rows SRCS selected_rows.cc DEPS tensor) cc_test(selected_rows_test SRCS selected_rows_test.cc DEPS selected_rows) -cc_library(init SRCS init.cc DEPS gflags device_context place stringpiece operator) -cc_test(init_test SRCS init_test.cc DEPS init) - cc_test(op_kernel_type_test SRCS op_kernel_type_test.cc DEPS place device_context framework_proto) cc_test(cow_ptr_tests SRCS details/cow_ptr_test.cc) - + # cc_test(channel_test SRCS channel_test.cc) cc_test(tuple_test SRCS tuple_test.cc ) -cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op - channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op - conditional_block_op while_op assign_op print_op executor proto_desc) + +# disable test temporarily. +# TODO https://github.com/PaddlePaddle/Paddle/issues/11971 +# cc_test(concurrency_test SRCS concurrency_test.cc DEPS go_op channel_close_op channel_create_op +# channel_send_op channel_recv_op sum_op select_op elementwise_add_op compare_op +# conditional_block_op while_op assign_op print_op executor proto_desc) diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 1b6f656a00..f537e4b9e5 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -134,6 +134,11 @@ OpDesc *BlockDesc::PrependOp() { return ops_.front().get(); } +void BlockDesc::PrependAllocatedOp(std::unique_ptr &&op_desc) { + need_update_ = true; + ops_.emplace_front(std::move(op_desc)); +} + OpDesc *BlockDesc::InsertOp(size_t index) { need_update_ = true; auto it = ops_.begin() + index; @@ -164,17 +169,13 @@ void BlockDesc::Flush() { } if (need_update_) { - auto &op_field = *this->desc_->mutable_ops(); - this->ClearPBOps(); - op_field.Reserve(static_cast(ops_.size())); + this->desc_->mutable_ops()->Clear(); for (auto &op_desc : ops_) { - op_field.AddAllocated(op_desc->Proto()); + this->desc_->mutable_ops()->Add()->CopyFrom(*op_desc->Proto()); } - auto &var_field = *this->desc_->mutable_vars(); - this->ClearPBVars(); - var_field.Reserve(static_cast(vars_.size())); + this->desc_->mutable_vars()->Clear(); for (auto &var_desc : vars_) { - var_field.AddAllocated(var_desc.second->Proto()); + this->desc_->mutable_vars()->Add()->CopyFrom(*var_desc.second->Proto()); } need_update_ = false; } @@ -195,7 +196,7 @@ BlockDesc::BlockDesc(ProgramDesc *prog, proto::BlockDesc *desc) vars_[var_desc.name()].reset(new VarDesc(var_desc)); } for (const proto::OpDesc &op_desc : desc_->ops()) { - ops_.emplace_back(new OpDesc(op_desc, prog, this)); + ops_.emplace_back(new OpDesc(op_desc, this)); } } @@ -204,7 +205,7 @@ BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, : prog_(prog), desc_(desc) { need_update_ = true; for (auto &op : other.ops_) { - ops_.emplace_back(new OpDesc(*op->Proto(), prog, this)); + ops_.emplace_back(new OpDesc(*op, this)); } for (auto &it : other.vars_) { auto *var = new VarDesc(*it.second); @@ -212,22 +213,6 @@ BlockDesc::BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, } } -void BlockDesc::ClearPBOps() { - auto ops = this->desc_->mutable_ops(); - while (!ops->empty()) { - // we do not own the OpDesc, so release the ownership. - ops->ReleaseLast(); - } -} - -void BlockDesc::ClearPBVars() { - auto vars = this->desc_->mutable_vars(); - while (!vars->empty()) { - // we do not own the VarDesc, so release the ownership. - vars->ReleaseLast(); - } -} - void BlockDesc::SetForwardBlockID(int32_t forward_block_id) { PADDLE_ENFORCE(!desc_->has_forward_block_idx(), "Parent block ID has been set to %d. Cannot set to %d", diff --git a/paddle/fluid/framework/block_desc.h b/paddle/fluid/framework/block_desc.h index eef19c4f09..960ca39e1e 100644 --- a/paddle/fluid/framework/block_desc.h +++ b/paddle/fluid/framework/block_desc.h @@ -41,11 +41,6 @@ class BlockDesc { BlockDesc(const BlockDesc &other, proto::BlockDesc *desc, ProgramDesc *prog); - ~BlockDesc() { - this->ClearPBVars(); - this->ClearPBOps(); - } - int32_t ID() const { return desc_->idx(); } int32_t Parent() const { return desc_->parent_idx(); } @@ -88,12 +83,13 @@ class BlockDesc { OpDesc *PrependOp(); + void PrependAllocatedOp(std::unique_ptr &&op_desc); + OpDesc *InsertOp(size_t index); /* - * Remove Op and its input/output variables. - * Note that for either input or output variable, if it is also an input or - * output variable of other ops, we should remain it. + * Only remove op itself, + * do nothing to its input and output variables */ void RemoveOp(size_t s, size_t e); @@ -103,7 +99,7 @@ class BlockDesc { size_t OpSize() const { return ops_.size(); } - OpDesc *Op(int idx) { return ops_.at(idx).get(); } + OpDesc *Op(int idx) const { return ops_.at(idx).get(); } void Flush(); @@ -111,10 +107,6 @@ class BlockDesc { ProgramDesc *Program() const { return this->prog_; } - private: - void ClearPBOps(); - void ClearPBVars(); - private: ProgramDesc *prog_; // not_own proto::BlockDesc *desc_; // not_own diff --git a/paddle/fluid/framework/data_device_transform.cc b/paddle/fluid/framework/data_device_transform.cc index 85dbb39e6f..6bcfc6cd55 100644 --- a/paddle/fluid/framework/data_device_transform.cc +++ b/paddle/fluid/framework/data_device_transform.cc @@ -16,29 +16,25 @@ limitations under the License. */ namespace paddle { namespace framework { -static const platform::DeviceContext* GetDeviceContext( - const platform::Place& src_place, const platform::Place& dst_place) { - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - - if (platform::is_gpu_place(src_place) && platform::is_cpu_place(dst_place)) { - return pool.Get(src_place); - } else if (platform::is_cpu_place(src_place) && - platform::is_gpu_place(dst_place)) { - return pool.Get(dst_place); - } else { - PADDLE_THROW( - "Currently, model parallelism is only supported between CPU and CUDA"); - } -} - -void TransDataDevice(const Tensor& in, const platform::Place& dst_place, - Tensor* out) { +void TransDataDevice(const Tensor &in, const platform::Place &dst_place, + Tensor *out) { VLOG(3) << "DeviceTransform in, src_place " << in.place() << " dst_place: " << dst_place; - auto* dev_ctx = GetDeviceContext(in.place(), dst_place); - dev_ctx->Wait(); - TensorCopy(in, dst_place, *dev_ctx, out); - dev_ctx->Wait(); + + PADDLE_ENFORCE_NE( + in.place().which(), dst_place.which(), + "Currently, model parallelism is only supported between CPU and CUDA"); + + // FIXME(zcd): TransDataDevice is used to transform data from GPU to CPU and + // the enforced checkings have been done in GetDeviceContext, so the + // `dev_ctx->Wait()` is necessary. But `dev_ctx->Wait()` will make the program + // slow, especially when the number of elements is little, for example, + // the elements of learning rate are one and it's CPU side. + // One solution is to use a CUDA kernel to complete the copy operation when + // the transforming is from CPU to GPU and the number of elements is little. + // But the embarrassment is that this solution this solution makes training + // slower. + TensorCopySync(in, dst_place, out); } } // namespace framework diff --git a/paddle/fluid/framework/data_device_transform_test.cu b/paddle/fluid/framework/data_device_transform_test.cu index df4caa45eb..f2c55e533a 100644 --- a/paddle/fluid/framework/data_device_transform_test.cu +++ b/paddle/fluid/framework/data_device_transform_test.cu @@ -14,13 +14,13 @@ limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/elementwise_op_function.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace framework { @@ -32,8 +32,7 @@ struct AddFunctor { class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input1 of test op"); AddOutput("output", "output of test op"); AddAttr("use_gpu", "force to use gpu kernel").SetDefault(false); diff --git a/paddle/fluid/framework/data_layout.h b/paddle/fluid/framework/data_layout.h index 9c5e2cf7cc..b611bb77b4 100644 --- a/paddle/fluid/framework/data_layout.h +++ b/paddle/fluid/framework/data_layout.h @@ -27,6 +27,7 @@ enum class DataLayout { kNHWC = 0, kNCHW = 1, kAnyLayout = 2, + kMKLDNN = 3, // all layouts supported by MKLDNN internally }; inline DataLayout StringToDataLayout(const std::string& str) { @@ -41,6 +42,8 @@ inline DataLayout StringToDataLayout(const std::string& str) { return DataLayout::kNCHW; } else if (s == "ANYLAYOUT") { return DataLayout::kAnyLayout; + } else if (s == "MKLDNNLAYOUT") { + return DataLayout::kMKLDNN; } else { PADDLE_THROW("Unknown storage order string: %s", s); } @@ -54,8 +57,10 @@ inline std::string DataLayoutToString(const DataLayout& data_layout) { return "NCHW"; case DataLayout::kAnyLayout: return "ANY_LAYOUT"; + case DataLayout::kMKLDNN: + return "MKLDNNLAYOUT"; default: - PADDLE_THROW("unknown DataLayou %d", data_layout); + PADDLE_THROW("unknown DataLayout %d", data_layout); } } diff --git a/paddle/fluid/framework/data_layout_transform.cc b/paddle/fluid/framework/data_layout_transform.cc index 60ec60a427..cd00b7de73 100644 --- a/paddle/fluid/framework/data_layout_transform.cc +++ b/paddle/fluid/framework/data_layout_transform.cc @@ -16,6 +16,9 @@ #include #include "paddle/fluid/operators/math/math_function.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif namespace paddle { namespace framework { @@ -88,5 +91,84 @@ void TransDataLayout(const OpKernelType& kernel_type_for_var, out->set_layout(expected_kernel_type.data_layout_); } +#ifdef PADDLE_WITH_MKLDNN +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::reorder; + +void* GetDataFromTensor(const Tensor& tensor, mkldnn::memory::data_type type) { + switch (type) { + case mkldnn::memory::data_type::f32: + return platform::to_void_cast(tensor.data()); + case mkldnn::memory::data_type::s8: + return platform::to_void_cast(tensor.data()); + case mkldnn::memory::data_type::u8: + return platform::to_void_cast(tensor.data()); + case mkldnn::memory::data_type::s16: + return platform::to_void_cast(tensor.data()); + case mkldnn::memory::data_type::s32: + return platform::to_void_cast(tensor.data()); + default: + PADDLE_THROW("wrong mkldnn type provided"); + } +} +#endif + +void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var, + const OpKernelType& expected_kernel_type, + const Tensor& in, Tensor* out) { + auto in_layout = kernel_type_for_var.data_layout_; + auto out_layout = expected_kernel_type.data_layout_; + + PADDLE_ENFORCE( + in_layout == DataLayout::kMKLDNN && out_layout != DataLayout::kMKLDNN, + "TransDataLayoutFromMKLDNN only supports transform from MKLDNN to " + "non-MKLDNN"); + +#ifdef PADDLE_WITH_MKLDNN + PADDLE_ENFORCE(in.format() != memory::format::format_undef && + in.format() != memory::format::any, + "Input tensor should have specified memory format"); + + // Set default as NCHW in case not specified + out_layout = + out_layout == DataLayout::kAnyLayout ? DataLayout::kNCHW : out_layout; + + auto& pool = platform::DeviceContextPool::Instance(); + auto* dev_ctx = dynamic_cast( + pool.Get(expected_kernel_type.place_)); + auto& cpu_engine = dev_ctx->GetEngine(); + + std::vector in_tz = paddle::framework::vectorize2int(in.dims()); + std::vector out_tz = in_tz; + + memory::data_type in_type = ToMKLDNNDataType(in.type()); + PADDLE_ENFORCE(in_type != memory::data_type::data_undef, + "Input tensor type is not supported: ", in.type().name()); + memory::data_type out_type = in_type; + + auto in_format = platform::MKLDNNFormatForSize(in_tz.size(), in.format()); + auto out_format = + platform::MKLDNNFormatForSize(in_tz.size(), ToMKLDNNFormat(out_layout)); + + void* in_data = GetDataFromTensor(in, in_type); + + // output tensor has the same dims as input. Reorder don't change dims + out->Resize(in.dims()); + + auto out_data = out->mutable_data(expected_kernel_type.place_, in.type()); + + auto in_memory = memory({{{in_tz}, in_type, in_format}, cpu_engine}, in_data); + auto out_memory = + memory({{{out_tz}, out_type, out_format}, cpu_engine}, out_data); + + platform::Reorder(in_memory, out_memory); + + out->set_layout(out_layout); + // reset format since the out tensor will be feed to non-MKLDNN OPkernel + out->set_format(memory::format::format_undef); +#endif +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/data_layout_transform.h b/paddle/fluid/framework/data_layout_transform.h index 06b638663d..90bb206ec6 100644 --- a/paddle/fluid/framework/data_layout_transform.h +++ b/paddle/fluid/framework/data_layout_transform.h @@ -14,6 +14,7 @@ #pragma once +#include #include #include "paddle/fluid/framework/op_kernel_type.h" #include "paddle/fluid/framework/tensor.h" @@ -22,6 +23,51 @@ namespace paddle { namespace framework { +#ifdef PADDLE_WITH_MKLDNN +using MKLDNNFormat = mkldnn::memory::format; +using MKLDNNDataType = mkldnn::memory::data_type; + +inline MKLDNNFormat ToMKLDNNFormat(const DataLayout& layout) { + switch (layout) { + case DataLayout::kNHWC: + return MKLDNNFormat::nhwc; + case DataLayout::kNCHW: + return MKLDNNFormat::nchw; + default: + PADDLE_THROW("Fail to convert layout %s to MKLDNN format", + DataLayoutToString(layout)); + } +} + +inline DataLayout ToPaddleLayout(const MKLDNNFormat& format) { + switch (format) { + case MKLDNNFormat::nhwc: + return DataLayout::kNHWC; + case MKLDNNFormat::nchw: + return DataLayout::kNCHW; + default: + PADDLE_THROW("Fail to convert MKLDNN format to paddle layout"); + } +} + +inline MKLDNNDataType ToMKLDNNDataType(const std::type_index type) { + static const std::map dict{ + {std::type_index(typeid(float)), MKLDNNDataType::f32}, // NOLINT + {std::type_index(typeid(char)), MKLDNNDataType::s8}, // NOLINT + {std::type_index(typeid(unsigned char)), MKLDNNDataType::u8}, + {std::type_index(typeid(int16_t)), MKLDNNDataType::s16}, + {std::type_index(typeid(int32_t)), MKLDNNDataType::s32}}; + auto iter = dict.find(type); + if (iter != dict.end()) return iter->second; + return MKLDNNDataType::data_undef; +} + +#endif + +void TransDataLayoutFromMKLDNN(const OpKernelType& kernel_type_for_var, + const OpKernelType& expected_kernel_type, + const Tensor& in, Tensor* out); + std::vector GetAxis(const DataLayout& from, const DataLayout& to); void TransDataLayout(const OpKernelType& kernel_type_for_var, diff --git a/paddle/fluid/framework/data_transform.cc b/paddle/fluid/framework/data_transform.cc index 9c277a27da..8287222450 100644 --- a/paddle/fluid/framework/data_transform.cc +++ b/paddle/fluid/framework/data_transform.cc @@ -18,26 +18,57 @@ limitations under the License. */ #include "paddle/fluid/framework/data_layout_transform.h" #include "paddle/fluid/framework/data_type_transform.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + namespace paddle { namespace framework { -static void PassTensorData(Tensor* from, Tensor* to) { +static void PassTensorData(Tensor *from, Tensor *to) { to->ShareDataWith(*from); *from = Tensor(); } -void DataTransform(const OpKernelType& expected_kernel_type, - const OpKernelType& kernel_type_for_var, - const Tensor& input_tensor, Tensor* output_tensor) { +void TransformData(const OpKernelType &expected_kernel_type, + const OpKernelType &kernel_type_for_var, + const Tensor &input_tensor, Tensor *output_tensor) { bool transformed = false; Tensor in; in.ShareDataWith(input_tensor); Tensor out; + DataLayout lin = kernel_type_for_var.data_layout_; + DataLayout lout = expected_kernel_type.data_layout_; // do layout transform - if (NeedTransformLayout(expected_kernel_type.data_layout_, - kernel_type_for_var.data_layout_)) { - TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out); + if (NeedTransformLayout(lout, lin)) { + if (lin == DataLayout::kMKLDNN || lout == DataLayout::kMKLDNN) { + PADDLE_ENFORCE( + !(lin == DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN), + "No layout transform needed between two MKLDNN OPKernels"); + + if (lin != DataLayout::kMKLDNN && lout == DataLayout::kMKLDNN) { +#ifdef PADDLE_WITH_MKLDNN + // Case1 - transform from Non-MKLDNN OPKernel to MKLDNN OPKernel + // Just set layout/format. No real transform occur + + auto out_format = platform::MKLDNNFormatForSize(in.dims().size(), + ToMKLDNNFormat(lin)); + + out.ShareDataWith(input_tensor); + out.set_layout(DataLayout::kMKLDNN); + out.set_format(out_format); +#endif + } else { + // Case2 - transfrom from MKLDNN OPKernel to Non-MKLDNN OPKernel + // Do transform via MKLDNN lib + TransDataLayoutFromMKLDNN(kernel_type_for_var, expected_kernel_type, in, + &out); + } + } else { + // Case3 - transfrom between Non-MKLDNN OPKernels + TransDataLayout(kernel_type_for_var, expected_kernel_type, in, &out); + } transformed = true; PassTensorData(&out, &in); } @@ -62,17 +93,17 @@ void DataTransform(const OpKernelType& expected_kernel_type, output_tensor->ShareDataWith(in); } -void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor, - Variable* out_var) { +void SetTensorToVariable(const Variable &in_var, const Tensor &tensor, + Variable *out_var) { if (in_var.IsType()) { - auto& in_lod_tensor = in_var.Get(); - auto* tran_lod_tensor = out_var->GetMutable(); + auto &in_lod_tensor = in_var.Get(); + auto *tran_lod_tensor = out_var->GetMutable(); tran_lod_tensor->set_lod(in_lod_tensor.lod()); tran_lod_tensor->set_layout(in_lod_tensor.layout()); tran_lod_tensor->ShareDataWith(tensor); } else if (in_var.IsType()) { - auto& in_selected_rows = in_var.Get(); - auto* trans_selected_rows = out_var->GetMutable(); + auto &in_selected_rows = in_var.Get(); + auto *trans_selected_rows = out_var->GetMutable(); trans_selected_rows->set_height(in_selected_rows.height()); trans_selected_rows->set_rows(in_selected_rows.rows()); trans_selected_rows->mutable_value()->ShareDataWith(tensor); diff --git a/paddle/fluid/framework/data_transform.h b/paddle/fluid/framework/data_transform.h index dee5d8c7c1..ae3ab051bd 100644 --- a/paddle/fluid/framework/data_transform.h +++ b/paddle/fluid/framework/data_transform.h @@ -30,12 +30,15 @@ limitations under the License. */ namespace paddle { namespace framework { -void DataTransform(const OpKernelType& expected_kernel_type, - const OpKernelType& kernel_type_for_var, - const Tensor& input_tensor, Tensor* out); - -void CopyVariableWithTensor(const Variable& in_var, const Tensor& tensor, - Variable* out_var); +void TransformData(const OpKernelType &expected_kernel_type, + const OpKernelType &kernel_type_for_var, + const Tensor &input_tensor, Tensor *out); + +/** + * Set OutVar from InVar, except the tensor is shared with `tensor` + */ +void SetTensorToVariable(const Variable &in_var, const Tensor &tensor, + Variable *out_var); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/data_type.cc b/paddle/fluid/framework/data_type.cc new file mode 100644 index 0000000000..1a9ce746ea --- /dev/null +++ b/paddle/fluid/framework/data_type.cc @@ -0,0 +1,107 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/data_type.h" +#include +#include +#include + +using float16 = paddle::platform::float16; + +namespace paddle { +namespace framework { + +struct DataTypeMap { + std::unordered_map cpp_to_proto_; + std::unordered_map proto_to_cpp_; + std::unordered_map proto_to_str_; + std::unordered_map cpp_to_size_; +}; + +static DataTypeMap* InitDataTypeMap(); +// C++11 removes the need for manual locking. Concurrent execution shall wait if +// a static local variable is already being initialized. +// https://stackoverflow.com/questions/11711920/how-to-implement-multithread-safe-singleton-in-c11-without-using-mutex +static DataTypeMap& gDataTypeMap() { + static DataTypeMap* g_data_type_map_ = InitDataTypeMap(); + return *g_data_type_map_; +} + +template +static inline void RegisterType(DataTypeMap* map, + proto::VarType::Type proto_type, + const std::string& name) { + map->proto_to_cpp_.emplace(static_cast(proto_type), typeid(T)); + map->cpp_to_proto_.emplace(typeid(T), proto_type); + map->proto_to_str_.emplace(static_cast(proto_type), name); + map->cpp_to_size_.emplace(typeid(T), sizeof(T)); +} + +static DataTypeMap* InitDataTypeMap() { + auto retv = new DataTypeMap(); + +#define RegType(cc_type, proto_type) \ + RegisterType(retv, proto_type, #cc_type) + + // NOTE: Add your customize type here. + RegType(float16, proto::VarType::FP16); + RegType(float, proto::VarType::FP32); + RegType(double, proto::VarType::FP64); + RegType(int, proto::VarType::INT32); + RegType(int64_t, proto::VarType::INT64); + RegType(bool, proto::VarType::BOOL); + RegType(size_t, proto::VarType::SIZE_T); + RegType(int16_t, proto::VarType::INT16); + RegType(uint8_t, proto::VarType::UINT8); + +#undef RegType + return retv; +} + +proto::VarType::Type ToDataType(std::type_index type) { + auto it = gDataTypeMap().cpp_to_proto_.find(type); + if (it != gDataTypeMap().cpp_to_proto_.end()) { + return it->second; + } + PADDLE_THROW("Not support %s as tensor type", type.name()); +} + +std::type_index ToTypeIndex(proto::VarType::Type type) { + auto it = gDataTypeMap().proto_to_cpp_.find(static_cast(type)); + if (it != gDataTypeMap().proto_to_cpp_.end()) { + return it->second; + } + PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", + static_cast(type)); +} + +std::string DataTypeToString(const proto::VarType::Type type) { + auto it = gDataTypeMap().proto_to_str_.find(static_cast(type)); + if (it != gDataTypeMap().proto_to_str_.end()) { + return it->second; + } + PADDLE_THROW("Not support proto::VarType::Type(%d) as tensor type", + static_cast(type)); +} + +size_t SizeOfType(std::type_index type) { + auto it = gDataTypeMap().cpp_to_size_.find(type); + if (it != gDataTypeMap().cpp_to_size_.end()) { + return it->second; + } + PADDLE_THROW("Not support %s as tensor type", type.name()); +} + +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/data_type.h b/paddle/fluid/framework/data_type.h index 2a528eb3aa..491413db8c 100644 --- a/paddle/fluid/framework/data_type.h +++ b/paddle/fluid/framework/data_type.h @@ -17,51 +17,14 @@ limitations under the License. */ #include #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/platform/enforce.h" + #include "paddle/fluid/platform/float16.h" namespace paddle { namespace framework { -inline proto::VarType::Type ToDataType(std::type_index type) { - if (typeid(platform::float16).hash_code() == type.hash_code()) { - return proto::VarType::FP16; - } else if (typeid(const float).hash_code() == type.hash_code()) { - // CPPLint complains Using C-style cast. Use static_cast() instead - // One fix to this is to replace float with const float because - // typeid(T) == typeid(const T) - // http://en.cppreference.com/w/cpp/language/typeid - return proto::VarType::FP32; - } else if (typeid(const double).hash_code() == type.hash_code()) { - return proto::VarType::FP64; - } else if (typeid(const int).hash_code() == type.hash_code()) { - return proto::VarType::INT32; - } else if (typeid(const int64_t).hash_code() == type.hash_code()) { - return proto::VarType::INT64; - } else if (typeid(const bool).hash_code() == type.hash_code()) { - return proto::VarType::BOOL; - } else { - PADDLE_THROW("Not supported"); - } -} - -inline std::type_index ToTypeIndex(proto::VarType::Type type) { - switch (type) { - case proto::VarType::FP16: - return typeid(platform::float16); - case proto::VarType::FP32: - return typeid(float); - case proto::VarType::FP64: - return typeid(double); - case proto::VarType::INT32: - return typeid(int); - case proto::VarType::INT64: - return typeid(int64_t); - case proto::VarType::BOOL: - return typeid(bool); - default: - PADDLE_THROW("Not support type %d", type); - } -} +extern proto::VarType::Type ToDataType(std::type_index type); +extern std::type_index ToTypeIndex(proto::VarType::Type type); template inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { @@ -84,37 +47,23 @@ inline void VisitDataType(proto::VarType::Type type, Visitor visitor) { case proto::VarType::BOOL: visitor.template operator()(); break; - default: - PADDLE_THROW("Not supported"); - } -} - -inline std::string DataTypeToString(const proto::VarType::Type type) { - switch (type) { - case proto::VarType::FP16: - return "float16"; - case proto::VarType::FP32: - return "float32"; - case proto::VarType::FP64: - return "float64"; + case proto::VarType::UINT8: + visitor.template operator()(); + break; case proto::VarType::INT16: - return "int16"; - case proto::VarType::INT32: - return "int32"; - case proto::VarType::INT64: - return "int64"; - case proto::VarType::BOOL: - return "bool"; + visitor.template operator()(); + break; default: - PADDLE_THROW("Not support type %d", type); + PADDLE_THROW("Not supported %d", type); } } +extern std::string DataTypeToString(const proto::VarType::Type type); +extern size_t SizeOfType(std::type_index type); inline std::ostream& operator<<(std::ostream& out, const proto::VarType::Type& type) { out << DataTypeToString(type); return out; } - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/data_type_test.cc b/paddle/fluid/framework/data_type_test.cc new file mode 100644 index 0000000000..54c41c55ba --- /dev/null +++ b/paddle/fluid/framework/data_type_test.cc @@ -0,0 +1,40 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/framework/data_type.h" + +#include +#include "gtest/gtest.h" +#include "paddle/fluid/framework/tensor.h" + +TEST(DataType, float16) { + using paddle::framework::Tensor; + using paddle::platform::CPUPlace; + using paddle::platform::float16; + namespace f = paddle::framework; + f::proto::VarType::Type dtype = f::proto::VarType::FP16; + + Tensor tensor; + CPUPlace cpu; + tensor.mutable_data(cpu, f::ToTypeIndex(dtype)); + + // test fp16 tensor + EXPECT_EQ(tensor.type(), std::type_index(typeid(float16))); + + // test fp16 size + EXPECT_EQ(f::SizeOfType(f::ToTypeIndex(dtype)), 2u); + + // test debug info + std::string type = "float16"; + EXPECT_STREQ(f::DataTypeToString(dtype).c_str(), type.c_str()); +} diff --git a/paddle/fluid/framework/data_type_transform.cc b/paddle/fluid/framework/data_type_transform.cc index c0523f3c79..5a57ec2058 100644 --- a/paddle/fluid/framework/data_type_transform.cc +++ b/paddle/fluid/framework/data_type_transform.cc @@ -91,6 +91,12 @@ void TransDataType(const OpKernelType& kernel_type_for_var, case proto::VarType::BOOL: framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); break; + case proto::VarType::INT16: + framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); + break; + case proto::VarType::UINT8: + framework::VisitDataType(dst_type, CastDataType(in, out, ctx)); + break; default: PADDLE_THROW("Not support type %d", src_type); } diff --git a/paddle/fluid/framework/details/CMakeLists.txt b/paddle/fluid/framework/details/CMakeLists.txt index 9de44beafb..8f6c4163d6 100644 --- a/paddle/fluid/framework/details/CMakeLists.txt +++ b/paddle/fluid/framework/details/CMakeLists.txt @@ -1,34 +1,37 @@ -cc_library(var_handle SRCS var_handle.cc DEPS place) +cc_library(var_handle SRCS var_handle.cc DEPS place framework_proto node) cc_library(op_handle_base SRCS op_handle_base.cc DEPS var_handle device_context lod_tensor) cc_library(scale_loss_grad_op_handle SRCS scale_loss_grad_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(fetch_op_handle SRCS fetch_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory) cc_library(computation_op_handle SRCS computation_op_handle.cc DEPS framework_proto scope place operator op_registry) -cc_library(send_op_handle SRCS send_op_handle.cc DEPS framework_proto scope place operator op_registry) +cc_library(rpc_op_handle SRCS rpc_op_handle.cc DEPS framework_proto scope place operator op_registry) -cc_library(ssa_graph SRCS ssa_graph.cc DEPS var_handle op_handle_base) -cc_library(ssa_graph_builder SRCS ssa_graph_builder.cc DEPS ssa_graph) +cc_library(multi_devices_helper SRCS multi_devices_helper.cc DEPS graph graph_helper) +cc_library(multi_devices_graph_print_pass SRCS multi_devices_graph_print_pass.cc DEPS multi_devices_helper) +cc_library(multi_devices_graph_check_pass SRCS multi_devices_graph_check_pass.cc DEPS multi_devices_helper) cc_library(variable_visitor SRCS variable_visitor.cc DEPS lod_tensor selected_rows) if(WITH_GPU) - nv_library(nccl_all_reduce_op_handle SRCS nccl_all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory - dynload_cuda) - set(multi_devices_graph_builder_deps nccl_all_reduce_op_handle) + nv_library(all_reduce_op_handle SRCS all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory + dynload_cuda variable_visitor) nv_library(reduce_op_handle SRCS reduce_op_handle.cc DEPS op_handle_base variable_visitor scope ddim dynload_cuda) nv_library(broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor dynload_cuda) else() - set(multi_devices_graph_builder_deps) + cc_library(all_reduce_op_handle SRCS all_reduce_op_handle.cc DEPS op_handle_base scope lod_tensor ddim memory + variable_visitor) cc_library(reduce_op_handle SRCS reduce_op_handle.cc DEPS op_handle_base variable_visitor scope ddim) cc_library(broadcast_op_handle SRCS broadcast_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) endif() +cc_library(data_balance_op_handle SRCS data_balance_op_handle.cc DEPS op_handle_base scope lod_tensor) cc_library(gather_op_handle SRCS gather_op_handle.cc DEPS op_handle_base scope ddim memory variable_visitor) +cc_library(fuse_vars_op_handle SRCS fuse_vars_op_handle.cc DEPS op_handle_base scope) -cc_library(multi_devices_graph_builder SRCS multi_devices_graph_builder.cc DEPS ssa_graph_builder computation_op_handle - scale_loss_grad_op_handle send_op_handle ${multi_devices_graph_builder_deps} reduce_op_handle broadcast_op_handle) +cc_library(multi_devices_graph_pass SRCS multi_devices_graph_pass.cc DEPS multi_devices_helper computation_op_handle + scale_loss_grad_op_handle rpc_op_handle all_reduce_op_handle reduce_op_handle broadcast_op_handle data_balance_op_handle) -cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS ssa_graph framework_proto) +cc_library(ssa_graph_executor SRCS ssa_graph_executor.cc DEPS graph framework_proto) cc_library(threaded_ssa_graph_executor SRCS threaded_ssa_graph_executor.cc DEPS fetch_op_handle ssa_graph_executor scope simple_threadpool device_context) @@ -36,5 +39,6 @@ cc_test(broadcast_op_test SRCS broadcast_op_handle_test.cc DEPS var_handle op_ha device_context broadcast_op_handle) cc_test(gather_op_test SRCS gather_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory device_context gather_op_handle) -cc_test(reduce_op_handle_test SRCS reduce_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory - device_context reduce_op_handle ) +cc_library(scope_buffered_ssa_graph_executor SRCS scope_buffered_ssa_graph_executor.cc DEPS ssa_graph_executor) +#cc_test(reduce_op_handle_test SRCS reduce_op_handle_test.cc DEPS var_handle op_handle_base scope ddim memory +# device_context reduce_op_handle ) diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc b/paddle/fluid/framework/details/all_reduce_op_handle.cc similarity index 56% rename from paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc rename to paddle/fluid/framework/details/all_reduce_op_handle.cc index 16aa5d067a..bf493a3fa4 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.cc +++ b/paddle/fluid/framework/details/all_reduce_op_handle.cc @@ -11,51 +11,72 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - -#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h" #include + +#include "paddle/fluid/framework/details/all_reduce_op_handle.h" +#include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/reduce_and_gather.h" +#include "paddle/fluid/framework/details/variable_visitor.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { namespace details { -NCCLAllReduceOpHandle::NCCLAllReduceOpHandle( - const std::vector &local_scopes, - const std::vector &places, - const platform::NCCLContextMap &ctxs) - : local_scopes_(local_scopes), places_(places), nccl_ctxs_(ctxs) { - for (auto &p : places_) { - this->dev_ctxes_[p] = nccl_ctxs_.DevCtx(p); + +#ifdef PADDLE_WITH_CUDA +AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, + const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap *ctxs) + : OpHandleBase(node), + local_scopes_(local_scopes), + places_(places), + nccl_ctxs_(ctxs) { + if (nccl_ctxs_) { + for (auto &p : places_) { + this->dev_ctxes_[p] = nccl_ctxs_->DevCtx(p); + } } } +#else +AllReduceOpHandle::AllReduceOpHandle(ir::Node *node, + const std::vector &local_scopes, + const std::vector &places) + : OpHandleBase(node), local_scopes_(local_scopes), places_(places) {} +#endif -void NCCLAllReduceOpHandle::RunImpl() { - if (inputs_.size() == 1) { +void AllReduceOpHandle::RunImpl() { + platform::RecordEvent r("all_reduce", nullptr); + if (NoDummyInputSize() == 1) { return; // No need to all reduce when GPU count = 1; } else { // Wait input done - for (auto *in : inputs_) { - auto &p = static_cast(in)->place_; - if (in->generated_op_) { - in->generated_op_->Wait(dev_ctxes_[p]); - } - } - - auto &var_name = static_cast(this->inputs_[0])->name_; - int dtype = -1; - size_t numel = 0; + WaitInputVarGenerated(); + auto in_var_handles = DynamicCast(this->Inputs()); + auto out_var_handles = DynamicCast(this->Outputs()); + PADDLE_ENFORCE_EQ( + in_var_handles.size(), places_.size(), + "The NoDummyInputSize should be equal to the number of places."); + PADDLE_ENFORCE_EQ( + in_var_handles.size(), out_var_handles.size(), + "The NoDummyInputSize and NoDummyOutputSize should be equal."); std::vector lod_tensors; - for (size_t i = 0; i < local_scopes_.size(); ++i) { auto *s = local_scopes_[i]; auto &local_scope = *s->FindVar(kLocalExecScopeName)->Get(); - - auto &lod_tensor = local_scope.FindVar(var_name)->Get(); + auto &lod_tensor = + local_scope.FindVar(in_var_handles[i]->name_)->Get(); lod_tensors.emplace_back(&lod_tensor); + PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_, + "The name of input and output should be equal."); } if (platform::is_gpu_place(lod_tensors[0]->place())) { +#ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE(nccl_ctxs_, "nccl_ctxs should not be nullptr."); + int dtype = -1; + size_t numel = 0; std::vector> all_reduce_calls; for (size_t i = 0; i < local_scopes_.size(); ++i) { auto &p = places_[i]; @@ -71,7 +92,7 @@ void NCCLAllReduceOpHandle::RunImpl() { } int dev_id = boost::get(p).device; - auto &nccl_ctx = nccl_ctxs_.at(dev_id); + auto &nccl_ctx = nccl_ctxs_->at(dev_id); auto stream = nccl_ctx.stream(); auto comm = nccl_ctx.comm_; all_reduce_calls.emplace_back([=] { @@ -86,22 +107,25 @@ void NCCLAllReduceOpHandle::RunImpl() { call(); } }); +#else + PADDLE_THROW("Not compiled with CUDA"); +#endif } else { // Special handle CPU only Operator's gradient. Like CRF auto &trg = *this->local_scopes_[0] ->FindVar(kLocalExecScopeName) ->Get() - ->Var() + ->FindVar(out_var_handles[0]->name_) ->GetMutable(); // Reduce All Tensor to trg in CPU ReduceLoDTensor func(lod_tensors, &trg); VisitDataType(ToDataType(lod_tensors[0]->type()), func); - for (size_t i = 0; i < local_scopes_.size(); ++i) { + for (size_t i = 1; i < local_scopes_.size(); ++i) { auto &scope = *local_scopes_[i]->FindVar(kLocalExecScopeName)->Get(); auto &p = places_[i]; - auto *var = scope.FindVar(var_name); + auto *var = scope.FindVar(out_var_handles[i]->name_); auto *dev_ctx = dev_ctxes_[p]; RunAndRecordEvent(p, [&trg, var, dev_ctx, p] { @@ -114,7 +138,7 @@ void NCCLAllReduceOpHandle::RunImpl() { } } -std::string NCCLAllReduceOpHandle::Name() const { return "nccl_all_reduce"; } +std::string AllReduceOpHandle::Name() const { return "all_reduce"; } } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.h b/paddle/fluid/framework/details/all_reduce_op_handle.h similarity index 67% rename from paddle/fluid/framework/details/nccl_all_reduce_op_handle.h rename to paddle/fluid/framework/details/all_reduce_op_handle.h index a0c321843e..f6ef3a1367 100644 --- a/paddle/fluid/framework/details/nccl_all_reduce_op_handle.h +++ b/paddle/fluid/framework/details/all_reduce_op_handle.h @@ -20,17 +20,23 @@ #include "paddle/fluid/framework/details/op_handle_base.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" +#ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/nccl_helper.h" +#endif namespace paddle { namespace framework { namespace details { -struct NCCLAllReduceOpHandle : public OpHandleBase { - NCCLAllReduceOpHandle(const std::vector &local_scopes, - const std::vector &places, - const platform::NCCLContextMap &ctxs); - +struct AllReduceOpHandle : public OpHandleBase { +#ifdef PADDLE_WITH_CUDA + AllReduceOpHandle(ir::Node *node, const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap *ctxs); +#else + AllReduceOpHandle(ir::Node *node, const std::vector &local_scopes, + const std::vector &places); +#endif std::string Name() const override; // Delay and buffer nccl_all_reduce together can significantly increase @@ -41,9 +47,11 @@ struct NCCLAllReduceOpHandle : public OpHandleBase { void RunImpl() override; private: - const std::vector &local_scopes_; - const std::vector &places_; - const platform::NCCLContextMap &nccl_ctxs_; + std::vector local_scopes_; + std::vector places_; +#ifdef PADDLE_WITH_CUDA + const platform::NCCLContextMap *nccl_ctxs_; +#endif }; } // namespace details diff --git a/paddle/fluid/framework/details/broadcast_op_handle.cc b/paddle/fluid/framework/details/broadcast_op_handle.cc index 2afa47c81b..1d9f1bd6e4 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle.cc @@ -38,9 +38,7 @@ void BroadcastOpHandle::RunImpl() { out_var_handles.size(), places_.size(), "The number of output should equal to the number of places."); - // Wait input done, this Wait is asynchronous operation platform::Place - // &in_place; - WaitInputVarGenerated(*in_var_handle); + WaitInputVarGenerated(); std::vector var_scopes; for (auto *s : local_scopes_) { @@ -50,29 +48,9 @@ void BroadcastOpHandle::RunImpl() { auto *in_var = var_scopes.at(in_var_handle->scope_idx_)->FindVar(in_var_handle->name_); PADDLE_ENFORCE_NOT_NULL(in_var); - Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); - // NOTE: The tensors' Place of input and output must be all on GPU or all on - // CPU. - for (auto *out_var_handle : out_var_handles) { - if (out_var_handle->IsTheSameVar(*in_var_handle)) { - continue; - } - auto t_out_p = out_var_handle->place_; - auto *out_var = var_scopes.at(out_var_handle->scope_idx_) - ->FindVar(out_var_handle->name_); - PADDLE_ENFORCE_NOT_NULL(out_var); - if (platform::is_gpu_place(in_tensor.place())) { - PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), - "Places of input and output must be all on GPU."); - } else { - t_out_p = platform::CPUPlace(); - } - VariableVisitor::ShareDimsAndLoD(*in_var, out_var); - VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, - in_tensor.type()); - } + InitOutputValue(*in_var_handle, out_var_handles); if (platform::is_cpu_place(in_tensor.place())) { for (auto *out_var_handle : out_var_handles) { @@ -95,6 +73,9 @@ void BroadcastOpHandle::RunImpl() { int root_id = boost::get(in_tensor.place()).device; std::vector> broadcast_calls; + int type = platform::ToNCCLDataType(in_tensor.type()); + size_t numel = static_cast(in_tensor.numel()); + for (auto out_var_handle : out_var_handles) { Variable *out_var = var_scopes.at(out_var_handle->scope_idx_) ->FindVar(out_var_handle->name_); @@ -109,13 +90,11 @@ void BroadcastOpHandle::RunImpl() { send_recv_buffer = const_cast(in_tensor.data()); out_handle = out_var_handle; } else { - send_recv_buffer = - VariableVisitor::GetMutableTensor(out_var).mutable_data( - out_var_handle->place_); + send_recv_buffer = VariableVisitor::GetMutableTensor(out_var) + .Resize(in_tensor.dims()) + .mutable_data(out_var_handle->place_); } - int type = platform::ToNCCLDataType(in_tensor.type()); - size_t numel = static_cast(in_tensor.numel()); broadcast_calls.emplace_back( [send_recv_buffer, numel, type, root_id, &nccl_ctx] { PADDLE_ENFORCE(platform::dynload::ncclBcast( @@ -147,11 +126,37 @@ void BroadcastOpHandle::RunImpl() { } } -void BroadcastOpHandle::WaitInputVarGenerated(const VarHandle &in_var) { - if (in_var.generated_op_) { - for (auto &pair : dev_ctxes_) { - in_var.generated_op_->Wait(pair.second); +void BroadcastOpHandle::InitOutputValue( + const VarHandle &in_var_handle, + const std::vector &out_var_handles) const { + std::vector var_scopes; + for (auto *s : local_scopes_) { + var_scopes.emplace_back(s->FindVar(kLocalExecScopeName)->Get()); + } + auto *in_var = + var_scopes.at(in_var_handle.scope_idx_)->FindVar(in_var_handle.name_); + + Tensor &in_tensor = VariableVisitor::GetMutableTensor(in_var); + + // NOTE: The tensors' Place of input and output must be all on GPU or all on + // CPU. + for (auto *out_var_handle : out_var_handles) { + if (out_var_handle->IsTheSameVar(in_var_handle)) { + continue; + } + auto t_out_p = out_var_handle->place_; + auto *out_var = var_scopes.at(out_var_handle->scope_idx_) + ->FindVar(out_var_handle->name_); + PADDLE_ENFORCE_NOT_NULL(out_var); + if (is_gpu_place(in_tensor.place())) { + PADDLE_ENFORCE(platform::is_gpu_place(t_out_p), + "Places of input and output must be all on GPU."); + } else { + t_out_p = platform::CPUPlace(); } + VariableVisitor::ShareDimsAndLoD(*in_var, out_var); + VariableVisitor::GetMutableTensor(out_var).mutable_data(t_out_p, + in_tensor.type()); } } diff --git a/paddle/fluid/framework/details/broadcast_op_handle.h b/paddle/fluid/framework/details/broadcast_op_handle.h index 984a95008c..fe4e733e43 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle.h +++ b/paddle/fluid/framework/details/broadcast_op_handle.h @@ -35,10 +35,13 @@ namespace details { struct BroadcastOpHandle : public OpHandleBase { public: #ifdef PADDLE_WITH_CUDA - BroadcastOpHandle(const std::vector &local_scopes, + BroadcastOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, const platform::NCCLContextMap *nccl_ctxs) - : local_scopes_(local_scopes), places_(places), nccl_ctxs_(nccl_ctxs) { + : OpHandleBase(node), + local_scopes_(local_scopes), + places_(places), + nccl_ctxs_(nccl_ctxs) { if (nccl_ctxs_) { for (auto &p_ctx : nccl_ctxs_->contexts_) { dev_ctxes_[platform::CUDAPlace(p_ctx.first)] = p_ctx.second.ctx_.get(); @@ -46,9 +49,9 @@ struct BroadcastOpHandle : public OpHandleBase { } } #else - BroadcastOpHandle(const std::vector &local_scopes, + BroadcastOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places) - : local_scopes_(local_scopes), places_(places) {} + : OpHandleBase(node), local_scopes_(local_scopes), places_(places) {} #endif std::string Name() const override; @@ -57,14 +60,16 @@ struct BroadcastOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const VarHandle &in_var); private: - const std::vector &local_scopes_; - const std::vector &places_; + std::vector local_scopes_; + std::vector places_; #ifdef PADDLE_WITH_CUDA const platform::NCCLContextMap *nccl_ctxs_; #endif + + void InitOutputValue(const VarHandle &in_var_handle, + const std::vector &out_var_handles) const; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/broadcast_op_handle_test.cc b/paddle/fluid/framework/details/broadcast_op_handle_test.cc index c6e923ef77..1413f7bd9a 100644 --- a/paddle/fluid/framework/details/broadcast_op_handle_test.cc +++ b/paddle/fluid/framework/details/broadcast_op_handle_test.cc @@ -96,48 +96,61 @@ struct TestBroadcastOpHandle { } param_scopes_[input_scope_idx]->Var("input"); + std::unique_ptr n( + new ir::Node("node0", ir::Node::Type::kOperation)); if (use_gpu_) { #ifdef PADDLE_WITH_CUDA - op_handle_.reset( - new BroadcastOpHandle(local_scopes_, gpu_list_, nccl_ctxs_.get())); + op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, gpu_list_, + nccl_ctxs_.get())); #else PADDLE_THROW("CUDA is not support."); #endif } else { #ifdef PADDLE_WITH_CUDA - op_handle_.reset( - new BroadcastOpHandle(local_scopes_, gpu_list_, nccl_ctxs_.get())); + op_handle_.reset(new BroadcastOpHandle(n.get(), local_scopes_, gpu_list_, + nccl_ctxs_.get())); #else - op_handle_.reset(new BroadcastOpHandle(local_scopes_, gpu_list_)); + op_handle_.reset( + new BroadcastOpHandle(n.get(), local_scopes_, gpu_list_)); #endif } - auto* in_var_handle = - new VarHandle(1, input_scope_idx, "input", gpu_list_[input_scope_idx]); + std::unique_ptr v( + new ir::Node("node1", ir::Node::Type::kVariable)); + auto* in_var_handle = new VarHandle(v.get(), 1, input_scope_idx, "input", + gpu_list_[input_scope_idx]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); // add dummy var - vars_.emplace_back(new DummyVarHandle()); + + std::unique_ptr v2( + new ir::Node("node2", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(v2.get())); DummyVarHandle* dummy_var_handle = static_cast(vars_.back().get()); - dummy_var_handle->generated_op_ = nullptr; + dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(dummy_var_handle); for (size_t j = 0; j < gpu_list_.size(); ++j) { if (!use_gpu_) { op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get()); } - VarHandle* out_var_handle = new VarHandle(2, j, "out", gpu_list_[j]); + std::unique_ptr v3( + new ir::Node("node3", ir::Node::Type::kVariable)); + VarHandle* out_var_handle = + new VarHandle(v3.get(), 2, j, "out", gpu_list_[j]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); } // add dummy var - vars_.emplace_back(new DummyVarHandle()); + std::unique_ptr v4( + new ir::Node("node4", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(v4.get())); DummyVarHandle* out_dummy_var_handle = static_cast(vars_.back().get()); - out_dummy_var_handle->generated_op_ = nullptr; + out_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddOutput(out_dummy_var_handle); } diff --git a/paddle/fluid/framework/details/build_strategy.h b/paddle/fluid/framework/details/build_strategy.h new file mode 100644 index 0000000000..8714a42162 --- /dev/null +++ b/paddle/fluid/framework/details/build_strategy.h @@ -0,0 +1,62 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +namespace paddle { +namespace framework { +namespace details { + +struct BuildStrategy { + // ParallelExecutor supports two modes of ReduceStrategy, kAllReduce and + // kReduce, for CPU and GPU. If you use kAllReduce, different threads + // optimize their parameters separately. If you use kReduce, the optimizations + // of parameters are distributed to different threads. + // For example, a model has 100 parameters and is running with four threads, + // if you choose kAllReduce, every thread is to optimize 100 parameters + // separately, if you choose kReduce, every thread is to optimize 25 + // parameters. + // Of particular note is, if you use kReduce when using CPU training, + // all the parameters are shared between different threads. This feature will + // save memory. + // FIXME(zcd): The result of the two modes(kAllReduce and kReduce) maybe not + // equal for GPU. Because, the result of the different order of summing maybe + // different, for example, the result of `a+b+c+d` may be different with the + // result of `c+a+b+d`. + // For GPU, the implementation of kAllReduce and kReduce is adopted NCCL, + // so the result of kAllReduce and kReduce maybe not equal. + // For CPU, if you want to fix the order of summing to make the result + // of kAllReduce and kReduce no diff, you can add + // `FLAGS_cpu_deterministic=true` to env. + enum class ReduceStrategy { kAllReduce = 0, kReduce = 1 }; + + enum class GradientScaleStrategy { + kCoeffNumDevice = 0, + kOne = 1, + kCustomized = 2, + }; + + ReduceStrategy reduce_{ReduceStrategy::kAllReduce}; + GradientScaleStrategy gradient_scale_{GradientScaleStrategy::kCoeffNumDevice}; + + std::string debug_graphviz_path_{""}; + + bool enable_data_balance_{false}; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/computation_op_handle.cc b/paddle/fluid/framework/details/computation_op_handle.cc index 7ff0efe093..b6282debdb 100644 --- a/paddle/fluid/framework/details/computation_op_handle.cc +++ b/paddle/fluid/framework/details/computation_op_handle.cc @@ -19,27 +19,28 @@ namespace paddle { namespace framework { namespace details { -ComputationOpHandle::ComputationOpHandle(const OpDesc &op_desc, Scope *scope, +ComputationOpHandle::ComputationOpHandle(ir::Node *node, Scope *scope, platform::Place place) - : op_(framework::OpRegistry::CreateOp(op_desc)), + : OpHandleBase(node), + op_(framework::OpRegistry::CreateOp(*node->Op())), scope_(scope), place_(place) {} void ComputationOpHandle::RunImpl() { - auto *cur_ctx = dev_ctxes_[place_]; - for (auto *in : inputs_) { - bool need_wait = in->generated_op_ && - in->generated_op_->DeviceContext(place_) != cur_ctx; - if (need_wait) { - in->generated_op_->Wait(cur_ctx); - } - } + WaitInputVarGenerated(place_); this->RunAndRecordEvent([this] { op_->Run(*scope_->FindVar(kLocalExecScopeName)->Get(), place_); }); } +bool ComputationOpHandle::NeedWait(VarHandleBase *in_var) { + bool need_wait = + in_var && in_var->GeneratedOp() && + in_var->GeneratedOp()->DeviceContext(place_) != dev_ctxes_[place_]; + return need_wait; +} + std::string ComputationOpHandle::Name() const { return op_->Type(); } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/computation_op_handle.h b/paddle/fluid/framework/details/computation_op_handle.h index c363b973d9..d9fcd92427 100644 --- a/paddle/fluid/framework/details/computation_op_handle.h +++ b/paddle/fluid/framework/details/computation_op_handle.h @@ -28,14 +28,15 @@ namespace framework { namespace details { struct ComputationOpHandle : public OpHandleBase { public: - ComputationOpHandle(const OpDesc &op_desc, Scope *scope, - platform::Place place); + ComputationOpHandle(ir::Node *node, Scope *scope, platform::Place place); std::string Name() const override; protected: void RunImpl() override; + bool NeedWait(VarHandleBase *in_var) override; + private: std::unique_ptr op_; Scope *scope_; diff --git a/paddle/fluid/framework/details/data_balance_op_handle.cc b/paddle/fluid/framework/details/data_balance_op_handle.cc new file mode 100644 index 0000000000..525d243224 --- /dev/null +++ b/paddle/fluid/framework/details/data_balance_op_handle.cc @@ -0,0 +1,154 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/data_balance_op_handle.h" +#include +#include "paddle/fluid/framework/details/container_cast.h" + +namespace paddle { +namespace framework { +namespace details { + +#ifdef PADDLE_WITH_CUDA +DataBalanceOpHandle::DataBalanceOpHandle( + ir::Node *node, const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap *ctxs) + : OpHandleBase(node), local_scopes_(local_scopes), places_(places) { + if (ctxs) { + for (auto &p : places_) { + this->dev_ctxes_[p] = ctxs->DevCtx(p); + } + } +} +#else +DataBalanceOpHandle::DataBalanceOpHandle( + ir::Node *node, const std::vector &local_scopes, + const std::vector &places) + : OpHandleBase(node), local_scopes_(local_scopes), places_(places) {} +#endif + +std::string DataBalanceOpHandle::Name() const { return "data balance"; } + +std::vector> DataBalanceOpHandle::GetBalancePlan( + const std::vector &device_sizes) { + int device_num = device_sizes.size(); + int total_size = 0; + int empty_num = 0; + std::vector> size_device_vec; + size_device_vec.reserve(device_num); + for (int i = 0; i < device_num; ++i) { + if (device_sizes[i] == 0) { + ++empty_num; + } + total_size += device_sizes[i]; + size_device_vec.push_back({{device_sizes[i], i}}); + } + std::vector> res; + if (empty_num == 0) { + // No need to do data balance. + return res; + } + if (total_size < device_num) { + // No enough data. + PADDLE_THROW_EOF(); + } + std::sort(size_device_vec.begin(), size_device_vec.end(), + [](const std::array &a, const std::array &b) { + return a[0] > b[0]; + }); + int expected_device_size = total_size / device_num; + int src_idx = 0; + for (int dst_idx = device_num - empty_num; dst_idx < device_num; ++dst_idx) { + if (size_device_vec[src_idx][0] <= expected_device_size) { + ++src_idx; + PADDLE_ENFORCE_LT( + src_idx, device_num - empty_num, + "In current srategy an empty tensor should not be copy source."); + } + size_device_vec[src_idx][0] -= expected_device_size; + size_device_vec[dst_idx][0] += expected_device_size; + res.push_back({{size_device_vec[src_idx][1], size_device_vec[dst_idx][1], + expected_device_size}}); + } + return res; +} + +void DataBalanceOpHandle::RunImpl() { + PADDLE_ENFORCE_GT(places_.size(), 1, + "Data balance can only be enabled when the number of " + "places to run larger than 1."); + auto in_var_handles = DynamicCast(inputs_); + auto out_var_handles = DynamicCast(outputs_); + PADDLE_ENFORCE(in_var_handles.size() % places_.size() == 0); + PADDLE_ENFORCE_EQ( + in_var_handles.size(), out_var_handles.size(), + "The NoDummyInputSize and NoDummyOutputSize should be equal."); + int data_num = in_var_handles.size() / places_.size(); + WaitInputVarGenerated(); + std::vector> lod_tensors(data_num); + std::vector device_sizes; + for (int i = 0; i < static_cast(in_var_handles.size()); ++i) { + PADDLE_ENFORCE_EQ(in_var_handles[i]->name_, out_var_handles[i]->name_, + "The name of input and output should be equal."); + int place_idx = i / data_num; + int data_idx = i % data_num; + auto *local_scope = + local_scopes_[place_idx]->FindVar(kLocalExecScopeName)->Get(); + auto *tensor_var = local_scope->FindVar(in_var_handles[i]->name_); + PADDLE_ENFORCE(tensor_var->IsType()); + auto *tensor = tensor_var->GetMutable(); + lod_tensors[data_idx].push_back(tensor); + int ins_size = + tensor->lod().empty() ? tensor->dims()[0] : tensor->NumElements(); + if (data_idx == 0) { + device_sizes.emplace_back(ins_size); + } else { + PADDLE_ENFORCE_EQ( + ins_size, device_sizes.at(place_idx), + "All data on the same device shall have the same batch size."); + } + } + const auto &balance_plan = GetBalancePlan(device_sizes); + + for (const auto &trans : balance_plan) { + for (int data_idx = 0; data_idx < data_num; ++data_idx) { + LoDTensor *src_tensor = lod_tensors[data_idx][trans[0]]; + LoDTensor *dst_tensor = lod_tensors[data_idx][trans[1]]; + int trans_ins_size = trans[2]; + LoD src_lod = src_tensor->lod(); + int src_ins_size = + src_lod.empty() ? src_tensor->dims()[0] : src_tensor->NumElements(); + int cut_point = src_ins_size - trans_ins_size; + if (!src_lod.empty()) { + for (auto &level : src_lod) { + cut_point = level[cut_point]; + } + } + TensorCopySync(src_tensor->Slice(cut_point, src_tensor->dims()[0]), + dst_tensor->place(), dst_tensor); + src_tensor->ShareDataWith(src_tensor->Slice(0, cut_point)); + if (!src_lod.empty()) { + dst_tensor->set_lod(SliceInLevel( + src_lod, 0, src_ins_size - trans_ins_size, src_ins_size)); + src_tensor->set_lod( + SliceInLevel(src_lod, 0, 0, src_ins_size - trans_ins_size)); + } + } + } +} + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/data_balance_op_handle.h b/paddle/fluid/framework/details/data_balance_op_handle.h new file mode 100644 index 0000000000..0462fb6ec7 --- /dev/null +++ b/paddle/fluid/framework/details/data_balance_op_handle.h @@ -0,0 +1,59 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#ifdef PADDLE_WITH_CUDA +#include "paddle/fluid/platform/nccl_helper.h" +#endif + +namespace paddle { +namespace framework { +namespace details { + +struct DataBalanceOpHandle : public OpHandleBase { + public: +#ifdef PADDLE_WITH_CUDA + DataBalanceOpHandle(ir::Node *node, const std::vector &local_scopes, + const std::vector &places, + const platform::NCCLContextMap *ctxs); +#else + DataBalanceOpHandle(ir::Node *node, const std::vector &local_scopes, + const std::vector &places); +#endif + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; + + private: + // std::vector<(src_dev_id, dst_dev_id, trans_size)> + std::vector> GetBalancePlan( + const std::vector &batch_size_per_device); + + const std::vector local_scopes_; + const std::vector places_; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/exception_holder.h b/paddle/fluid/framework/details/exception_holder.h new file mode 100644 index 0000000000..6e302a2923 --- /dev/null +++ b/paddle/fluid/framework/details/exception_holder.h @@ -0,0 +1,83 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace framework { +namespace details { + +class ExceptionHolder { + public: + void Catch(const platform::EnforceNotMet& exp) { + std::lock_guard lock(mu_); + exception_.reset(new platform::EnforceNotMet(exp)); + type_ = kEnforceNotMet; + } + + void Catch(const platform::EOFException& exp) { + std::lock_guard lock(mu_); + // EOFException will not cover up existing EnforceNotMet. + if (exception_.get() == nullptr) { + exception_.reset(new platform::EOFException(exp)); + type_ = kEOF; + } + } + + bool ExceptionCatched() const { + std::lock_guard lock(mu_); + return exception_.get() != nullptr; + } + + void Throw() { + std::lock_guard lock(mu_); + switch (type_) { + case kNone: + break; + case kEnforceNotMet: { + auto e = *static_cast(exception_.get()); + throw e; + break; + } + case kEOF: { + auto e = *static_cast(exception_.get()); + throw e; + break; + } + default: + LOG(FATAL) << "Unknown exception."; + } + exception_.reset(); + type_ = kNone; + } + + void Clear() { + std::lock_guard lock(mu_); + exception_.reset(); + type_ = kNone; + } + + private: + enum ExceptionType { kNone, kEnforceNotMet, kEOF }; + ExceptionType type_{kNone}; + + std::unique_ptr exception_; + mutable std::mutex mu_; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/execution_strategy.h b/paddle/fluid/framework/details/execution_strategy.h new file mode 100644 index 0000000000..716d674fa2 --- /dev/null +++ b/paddle/fluid/framework/details/execution_strategy.h @@ -0,0 +1,30 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +namespace paddle { +namespace framework { +namespace details { + +struct ExecutionStrategy { + size_t num_threads_{0}; + bool use_cuda_{true}; + bool allow_op_delay_{false}; + size_t num_iteration_per_drop_scope_{100}; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index a3cae8c64c..fe18b2060c 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -21,17 +21,20 @@ namespace paddle { namespace framework { namespace details { -FetchOpHandle::FetchOpHandle(FeedFetchList *data, size_t offset, +FetchOpHandle::FetchOpHandle(ir::Node *node, FeedFetchList *data, size_t offset, std::vector *local_scopes) - : data_(data), offset_(offset), local_scopes_(local_scopes) {} + : OpHandleBase(node), + data_(data), + offset_(offset), + local_scopes_(local_scopes) {} FetchOpHandle::~FetchOpHandle() { for (auto *input_var : inputs_) { - input_var->pending_ops_.erase(this); + input_var->RemoveOutput(this, this->Node()); } } -void FetchOpHandle::Wait(platform::DeviceContext *waited_dev) { +void FetchOpHandle::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { PADDLE_THROW("Nobody should wait FetchOp. Unexpceted Error"); } @@ -45,26 +48,21 @@ void FetchOpHandle::WaitAndMergeCPUTensors() const { } void FetchOpHandle::RunImpl() { - auto cpu_ctx = - platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); - for (auto *input : inputs_) { - auto *var = static_cast(input); - if (var->generated_op_) { - var->generated_op_->Wait(cpu_ctx); - } - } + WaitInputVarGenerated(platform::CPUPlace()); + tensors_.resize(inputs_.size()); - auto *var_handle = static_cast(inputs_[0]); - auto &var_name = var_handle->name_; platform::CPUPlace cpu; auto &scopes = *local_scopes_; - for (size_t i = 0; i < scopes.size(); ++i) { - auto &scope = scopes[i]; - auto *var = - scope->FindVar(kLocalExecScopeName)->Get()->FindVar(var_name); + for (size_t i = 0; i < inputs_.size(); ++i) { + auto *var_handle = static_cast(inputs_[i]); + auto &scope = scopes.at(var_handle->scope_idx_); + auto *var = scope->FindVar(kLocalExecScopeName) + ->Get() + ->FindVar(var_handle->name_); PADDLE_ENFORCE_NOT_NULL(var, "Cannot find variable %s in execution scope", - var_name); + var_handle->name_); + auto &t = var->Get(); if (platform::is_gpu_place(t.place())) { #ifdef PADDLE_WITH_CUDA @@ -72,13 +70,22 @@ void FetchOpHandle::RunImpl() { #endif } else { tensors_[i].ShareDataWith(t); - tensors_[i].set_lod(t.lod()); } + tensors_[i].set_lod(t.lod()); } this->WaitAndMergeCPUTensors(); } +void FetchOpHandle::WaitInputVarGenerated(const platform::Place &place) { + auto cpu_ctx = platform::DeviceContextPool::Instance().Get(place); + for (auto *input : inputs_) { + if (input->GeneratedOp()) { + input->GeneratedOp()->RecordWaitEventOnCtx(cpu_ctx); + } + } +} + std::string FetchOpHandle::Name() const { return "Fetch"; } } // namespace details diff --git a/paddle/fluid/framework/details/fetch_op_handle.h b/paddle/fluid/framework/details/fetch_op_handle.h index b49f3df338..6ce42f92d7 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.h +++ b/paddle/fluid/framework/details/fetch_op_handle.h @@ -28,12 +28,12 @@ namespace details { struct FetchOpHandle : public OpHandleBase { public: - FetchOpHandle(FeedFetchList *data, size_t offset, + FetchOpHandle(ir::Node *node, FeedFetchList *data, size_t offset, std::vector *local_scopes); ~FetchOpHandle(); - void Wait(platform::DeviceContext *waited_dev) override; + void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) override; void WaitAndMergeCPUTensors() const; @@ -42,6 +42,8 @@ struct FetchOpHandle : public OpHandleBase { protected: void RunImpl() override; + void WaitInputVarGenerated(const platform::Place &place) override; + private: FeedFetchList *data_; size_t offset_; diff --git a/paddle/fluid/framework/details/fuse_vars_op_handle.cc b/paddle/fluid/framework/details/fuse_vars_op_handle.cc new file mode 100644 index 0000000000..018c9bff71 --- /dev/null +++ b/paddle/fluid/framework/details/fuse_vars_op_handle.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/fuse_vars_op_handle.h" + +namespace paddle { +namespace framework { +namespace details { + +void FuseVarsOpHandle::RunImpl() { + WaitInputVarGenerated(place_); + + auto in_var_handles = DynamicCast(this->Inputs()); + auto out_var_handles = DynamicCast(this->Outputs()); + PADDLE_ENFORCE_EQ(in_var_handles.size(), 0); + PADDLE_ENFORCE_EQ(out_var_handles.size() - 1, inputs_numel_.size(), ""); + + auto scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); + + auto out_var_handle = out_var_handles[0]; + auto out_var = scope->Var(out_var_handle->name_); + + auto out_tensor = out_var->GetMutable(); + out_tensor->Resize({total_numel_}).mutable_data(this->place_, type_); + + int64_t s = 0; + for (size_t i = 1; i < out_var_handles.size(); ++i) { + auto out_name = out_var_handles[i]->name_; + auto out_t = scope->Var(out_name)->GetMutable(); + auto numel = this->inputs_numel_.at(out_name); + out_t->ShareDataWith(out_tensor->Slice(s, s + numel)); + s += numel; + } + this->RunAndRecordEvent([] {}); +} + +std::string FuseVarsOpHandle::Name() const { return "fuse vars"; } +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/fuse_vars_op_handle.h b/paddle/fluid/framework/details/fuse_vars_op_handle.h new file mode 100644 index 0000000000..3f360c510a --- /dev/null +++ b/paddle/fluid/framework/details/fuse_vars_op_handle.h @@ -0,0 +1,65 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/container_cast.h" +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace framework { +namespace details { + +struct FuseVarsOpHandle : public OpHandleBase { + public: + FuseVarsOpHandle(ir::Node *node, Scope *local_scope, + const platform::Place &place, + const std::unordered_map &inputs_numel, + const std::type_index &var_type) + : OpHandleBase(node), + local_scope_(local_scope), + place_(place), + inputs_numel_(inputs_numel), + type_(var_type) { + total_numel_ = 0; + for (auto in_numel : inputs_numel) { + PADDLE_ENFORCE_GT(in_numel.second, 0); + total_numel_ += in_numel.second; + } + } + + std::string Name() const override; + + bool IsMultiDeviceTransfer() override { return false; }; + + protected: + void RunImpl() override; + + private: + Scope *local_scope_; + const platform::Place place_; + const std::unordered_map inputs_numel_; + const std::type_index type_; + int64_t total_numel_; +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/gather_op_handle.cc b/paddle/fluid/framework/details/gather_op_handle.cc index 3dfc972a44..9aae19fc73 100644 --- a/paddle/fluid/framework/details/gather_op_handle.cc +++ b/paddle/fluid/framework/details/gather_op_handle.cc @@ -20,9 +20,10 @@ namespace paddle { namespace framework { namespace details { -GatherOpHandle::GatherOpHandle(const std::vector &local_scopes, +GatherOpHandle::GatherOpHandle(ir::Node *node, + const std::vector &local_scopes, const std::vector &places) - : local_scopes_(local_scopes), places_(places) {} + : OpHandleBase(node), local_scopes_(local_scopes), places_(places) {} void GatherOpHandle::RunImpl() { if (places_.size() == 1) return; @@ -55,7 +56,7 @@ void GatherOpHandle::RunImpl() { "Currently, gather_op only can gather SelectedRows."); // Wait input done, this Wait is asynchronous operation - WaitInputVarGenerated(in_var_handles); + WaitInputVarGenerated(); auto &pre_in_value = pre_in_var->Get(); std::vector out_rows; @@ -111,17 +112,6 @@ void GatherOpHandle::RunImpl() { }); } -void GatherOpHandle::WaitInputVarGenerated( - const std::vector &in_var_handles) { - for (auto *in : in_var_handles) { - if (in->generated_op_) { - for (auto pair : dev_ctxes_) { - in->generated_op_->Wait(pair.second); - } - } - } -} - std::string GatherOpHandle::Name() const { return "gather"; } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/gather_op_handle.h b/paddle/fluid/framework/details/gather_op_handle.h index c394dd7a14..d9afbc6547 100644 --- a/paddle/fluid/framework/details/gather_op_handle.h +++ b/paddle/fluid/framework/details/gather_op_handle.h @@ -30,7 +30,7 @@ namespace details { struct GatherOpHandle : public OpHandleBase { public: - GatherOpHandle(const std::vector &local_scopes, + GatherOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places); std::string Name() const override; @@ -39,7 +39,6 @@ struct GatherOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const std::vector &in_var_handles); private: const std::vector &local_scopes_; diff --git a/paddle/fluid/framework/details/gather_op_handle_test.cc b/paddle/fluid/framework/details/gather_op_handle_test.cc index 3cce2cc164..c9b94d1e10 100644 --- a/paddle/fluid/framework/details/gather_op_handle_test.cc +++ b/paddle/fluid/framework/details/gather_op_handle_test.cc @@ -70,6 +70,7 @@ struct TestGatherOpHandle { } void InitGatherOp(size_t input_scope_idx) { + std::vector> nodes; for (size_t j = 0; j < gpu_list_.size(); ++j) { local_scopes_.push_back(&(g_scope_.NewScope())); Scope& local_scope = local_scopes_.back()->NewScope(); @@ -81,30 +82,37 @@ struct TestGatherOpHandle { } param_scopes_[input_scope_idx]->Var("out"); - op_handle_.reset(new GatherOpHandle(local_scopes_, gpu_list_)); + nodes.emplace_back(new ir::Node("node", ir::Node::Type::kOperation)); + op_handle_.reset( + new GatherOpHandle(nodes.back().get(), local_scopes_, gpu_list_)); // add input for (size_t j = 0; j < gpu_list_.size(); ++j) { op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get()); - auto* in_var_handle = new VarHandle(1, j, "input", gpu_list_[j]); + nodes.emplace_back(new ir::Node("node1", ir::Node::Type::kVariable)); + auto* in_var_handle = + new VarHandle(nodes.back().get(), 1, j, "input", gpu_list_[j]); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); } // add dummy var - vars_.emplace_back(new DummyVarHandle()); + nodes.emplace_back(new ir::Node("node2", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(nodes.back().get())); DummyVarHandle* in_dummy_var_handle = static_cast(vars_.back().get()); - in_dummy_var_handle->generated_op_ = nullptr; + in_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(in_dummy_var_handle); // add output - auto* out_var_handle = - new VarHandle(2, input_scope_idx, "out", gpu_list_[input_scope_idx]); + nodes.emplace_back(new ir::Node("node3", ir::Node::Type::kVariable)); + auto* out_var_handle = new VarHandle(nodes.back().get(), 2, input_scope_idx, + "out", gpu_list_[input_scope_idx]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); // add dummy var - vars_.emplace_back(new DummyVarHandle()); + nodes.emplace_back(new ir::Node("node4", ir::Node::Type::kVariable)); + vars_.emplace_back(new DummyVarHandle(nodes.back().get())); DummyVarHandle* dummy_var_handle = static_cast(vars_.back().get()); op_handle_->AddOutput(dummy_var_handle); diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.cc b/paddle/fluid/framework/details/multi_devices_graph_builder.cc deleted file mode 100644 index 21197d587b..0000000000 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.cc +++ /dev/null @@ -1,354 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -#include "paddle/fluid/framework/details/multi_devices_graph_builder.h" -#include -#include "paddle/fluid/framework/details/broadcast_op_handle.h" -#include "paddle/fluid/framework/details/computation_op_handle.h" -#include "paddle/fluid/framework/details/reduce_op_handle.h" -#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" -#include "paddle/fluid/framework/details/send_op_handle.h" -#include "paddle/fluid/framework/scope.h" - -#ifdef PADDLE_WITH_CUDA -#include "paddle/fluid/framework/details/nccl_all_reduce_op_handle.h" -#endif - -#include -#include - -namespace paddle { -namespace framework { -namespace details { - -#ifdef PADDLE_WITH_CUDA -MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( - const std::vector &places, - const std::string &loss_var_name, - const std::unordered_set ¶ms, - const std::vector &local_scopes, - platform::NCCLContextMap *nccl_ctxs, bool use_default_grad_scale) - : loss_var_name_(loss_var_name), - places_(places), - local_scopes_(local_scopes), - nccl_ctxs_(nccl_ctxs) { -#else -MultiDevSSAGraphBuilder::MultiDevSSAGraphBuilder( - const std::vector &places, - const std::string &loss_var_name, - const std::unordered_set ¶ms, - const std::vector &local_scopes, bool use_default_grad_scale) - : loss_var_name_(loss_var_name), - places_(places), - local_scopes_(local_scopes) { -#endif - for (auto &p : params) { - grad_names_.insert(GradVarName(p)); - } - use_default_grad_scale_ = use_default_grad_scale; -} - -void MultiDevSSAGraphBuilder::CreateOpHandleIOs(SSAGraph *result, - const OpDesc &op, - size_t place_id) const { - auto p = places_[place_id]; - auto *op_handle = result->ops_.back().get(); - op_handle->SetDeviceContext(p, - platform::DeviceContextPool::Instance().Get(p)); - - for (auto &each_var_name : op.InputArgumentNames()) { - VarHandle *var = - CreateOrGetLatestVarHandle(result, each_var_name, p, place_id); - op_handle->AddInput(var); - } - - for (auto &each_var_name : op.OutputArgumentNames()) { - CreateOpOutput(result, op_handle, each_var_name, p, place_id); - } -} - -bool MultiDevSSAGraphBuilder::IsDistTrainOp(const OpDesc &op, - OpDesc *send_op) const { - if (send_op == nullptr) { - return false; - } - - /** - * Check any of opvars contains `.block` and in sendvars - */ - auto checker = [](const std::vector &opvars, - const std::vector &sendvars) -> bool { - for (auto &var : opvars) { - if (var.find(".block") != std::string::npos && - std::find(sendvars.begin(), sendvars.end(), var) != sendvars.end()) { - return true; - } - } - return false; - }; - - if (op.Type() == "split") { - return checker(op.OutputArgumentNames(), send_op->InputArgumentNames()); - } else if (op.Type() == "concat") { - return checker(op.InputArgumentNames(), send_op->OutputArgumentNames()); - } - return false; -} - -std::unique_ptr MultiDevSSAGraphBuilder::Build( - const ProgramDesc &program) const { - std::unordered_map var_types; - for (auto *var : program.Block(0).AllVars()) { - var_types[var->Name()] = var->GetType(); - } - - auto graph = new SSAGraph(); - SSAGraph &result = *graph; - std::unordered_set og_has_been_broadcast; - - // We cannot invoke resize. It is a bug of GCC 4.8 - result.vars_ = std::vector< - std::unordered_map>>>( - places_.size()); - - // Find "send" op first for split is in front of send. - OpDesc *send_op = GetSendOpDesc(program); - - bool is_forwarding = true; - for (auto *op : program.Block(0).AllOps()) { - if (op->Type() == "send") { - // append send op if program is distributed trainer main program. - // always use the first device - CreateSendOp(&result, *op); - } else if (IsDistTrainOp(*op, send_op)) { - CreateComputationalOps(&result, *op, 1); - } else if (IsScaleLossOp(*op)) { - // user can customize loss@grad if not use_default_grad_scale_ - if (use_default_grad_scale_) { - CreateScaleLossGradOp(&result); - } - is_forwarding = false; - } else { - CreateComputationalOps(&result, *op, places_.size()); - if (!is_forwarding && places_.size() > 1) { - // Currently, we assume that once gradient is generated, it can be - // broadcast, and each gradient is only broadcast once. - for (auto &og : op->OutputArgumentNames()) { - if (IsParameterGradientOnce(og, &og_has_been_broadcast)) { - if (IsSparseGradient(var_types, og)) { - CreateReduceOp(&result, og, 0); - CreateBroadcastOp(&result, og, 0); - } else { - InsertNCCLAllReduceOp(&result, og); - } - } - } - } - } - } - - /* - Dependency graph has been constructed. However, there are still data - harzaeds need to be handled. - */ - PolishGraphToSupportDataHazards(&result); - - /* - * Only variables should be the leaves of graph. - */ - AddOutputToLeafOps(&result); - - if (VLOG_IS_ON(10)) { - std::ostringstream sout; - PrintGraphviz(*graph, sout); - VLOG(10) << sout.str(); - } - - return std::unique_ptr(graph); -} - -bool MultiDevSSAGraphBuilder::IsSparseGradient( - const std::unordered_map &var_types, - const std::string &og) const { - PADDLE_ENFORCE(var_types.count(og) != 0); - if (var_types.at(og) == proto::VarType::SELECTED_ROWS) { - return true; - } - return false; -} - -void MultiDevSSAGraphBuilder::CreateBroadcastOp(SSAGraph *result, - const std::string &p_name, - size_t src_dev_id) const { -#ifdef PADDLE_WITH_CUDA - auto *op_handle = new BroadcastOpHandle(local_scopes_, places_, nccl_ctxs_); -#else - auto *op_handle = new BroadcastOpHandle(local_scopes_, places_); -#endif - - result->ops_.emplace_back(op_handle); - auto *in = result->vars_.at(src_dev_id).at(p_name).back().get(); - op_handle->AddInput(in); - - for (size_t i = 0; i < places_.size(); ++i) { - auto &vars = result->vars_.at(i).at(p_name); - auto &p = places_[i]; - auto *out_var = new VarHandle(vars.size(), i, p_name, p); - vars.emplace_back(out_var); - op_handle->AddOutput(out_var); -#ifndef ADDLE_WITH_CUDA - op_handle->SetDeviceContext(p, - platform::DeviceContextPool::Instance().Get(p)); -#endif - } -} - -void MultiDevSSAGraphBuilder::CreateComputationalOp(SSAGraph *result, - const OpDesc &op, - int dev_id) const { - result->ops_.emplace_back( - new ComputationOpHandle(op, local_scopes_[dev_id], places_[dev_id])); - CreateOpHandleIOs(result, op, dev_id); -} - -OpDesc *MultiDevSSAGraphBuilder::GetSendOpDesc( - const ProgramDesc &program) const { - for (auto *op : program.Block(0).AllOps()) { - if (op->Type() == "send") { - return op; - } - } - return nullptr; -} -void MultiDevSSAGraphBuilder::InsertNCCLAllReduceOp( - SSAGraph *result, const std::string &og) const { -#ifdef PADDLE_WITH_CUDA - result->ops_.emplace_back( - new NCCLAllReduceOpHandle(local_scopes_, places_, *nccl_ctxs_)); - auto *op_handle = result->ops_.back().get(); - - for (size_t i = 0; i < places_.size(); ++i) { - auto &p = places_[i]; - auto &vars = result->vars_[i][og]; - PADDLE_ENFORCE(!vars.empty()); - auto &prev_grad = vars.back(); - op_handle->AddInput(prev_grad.get()); - - auto var = new VarHandle(vars.size() - 1, i, og, p); - vars.emplace_back(var); - op_handle->AddOutput(var); - } -#else - PADDLE_ENFORCE("Not implemented"); -#endif -} - -bool MultiDevSSAGraphBuilder::IsParameterGradientOnce( - const std::string &og, - std::unordered_set *og_has_been_broadcast) const { - bool is_pg_once = - grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0; - if (is_pg_once) { - // Insert NCCL AllReduce Op - og_has_been_broadcast->insert(og); - } - return is_pg_once; -} - -void MultiDevSSAGraphBuilder::CreateScaleLossGradOp(SSAGraph *result) const { - for (size_t i = 0; i < places_.size(); ++i) { -// Insert ScaleCost OpHandle -#ifdef PADDLE_WITH_CUDA - auto *communication_dev_ctx = nccl_ctxs_->DevCtx(places_[i]); -#else - auto *communication_dev_ctx = - platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); -#endif - - auto *op_handle = - new ScaleLossGradOpHandle(local_scopes_.size(), local_scopes_[i], - places_[i], communication_dev_ctx); - result->ops_.emplace_back(op_handle); - - // FIXME: Currently ScaleLossGradOp only use device_count as scale - // factor. So it does not depend on any other operators. - // VarHandle *loss = GetVarHandle(loss_var_name, place); - // loss->pending_ops_.emplace_back(op_handle); - // op_handle->inputs_.emplace_back(loss); - - CreateOpOutput(result, op_handle, GradVarName(loss_var_name_), places_[i], - i); - } -} - -void MultiDevSSAGraphBuilder::CreateComputationalOps(SSAGraph *result, - const OpDesc &op, - size_t num_places) const { - for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) { - auto p = places_[scope_idx]; - auto s = local_scopes_[scope_idx]; - result->ops_.emplace_back(new ComputationOpHandle(op, s, p)); - CreateOpHandleIOs(result, op, scope_idx); - } -} - -VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(SSAGraph *result, - const std::string &og, - int dst_dev_id) const { -#ifdef PADDLE_WITH_CUDA - result->ops_.emplace_back( - new ReduceOpHandle(local_scopes_, places_, nccl_ctxs_)); -#else - result->ops_.emplace_back(new ReduceOpHandle(local_scopes_, places_)); -#endif - auto *op_handle = result->ops_.back().get(); - - for (size_t i = 0; i < places_.size(); ++i) { - auto &vars = result->vars_[i][og]; -#ifndef PADDLE_WITH_CUDA - auto &p = places_[i]; - op_handle->SetDeviceContext(p, - platform::DeviceContextPool::Instance().Get(p)); -#endif - PADDLE_ENFORCE(!vars.empty()); - auto &prev_grad = vars.back(); - op_handle->AddInput(prev_grad.get()); - } - auto &vars = result->vars_[dst_dev_id][og]; - auto var = - new VarHandle(vars.size() - 1, dst_dev_id, og, places_[dst_dev_id]); - vars.emplace_back(var); - op_handle->AddOutput(var); - return var; -} - -void MultiDevSSAGraphBuilder::CreateSendOp(SSAGraph *result, - const OpDesc &op) const { - auto &p = places_[0]; - auto *s = local_scopes_[0]; - // FIXME(wuyi): send op always copy from GPU 0 - result->ops_.emplace_back(new SendOpHandle(op, s, p)); - // Create inputs for output on original place and no ssa output - // is created for send op. - CreateOpHandleIOs(result, op, 0); -} - -bool MultiDevSSAGraphBuilder::IsScaleLossOp(const OpDesc &op) const { - // FIXME(yy): Do not hard code like this - return op.OutputArgumentNames().size() == 1 && - op.OutputArgumentNames()[0] == GradVarName(loss_var_name_); -} - -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_builder.h b/paddle/fluid/framework/details/multi_devices_graph_builder.h deleted file mode 100644 index 674e2779a1..0000000000 --- a/paddle/fluid/framework/details/multi_devices_graph_builder.h +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once -#include -#include -#include - -#include "paddle/fluid/framework/details/ssa_graph_builder.h" - -namespace paddle { -namespace platform { -class NCCLContextMap; -} - -namespace framework { -class Scope; -namespace details { - -class MultiDevSSAGraphBuilder : public SSAGraphBuilder { - public: -#ifdef PADDLE_WITH_CUDA - MultiDevSSAGraphBuilder(const std::vector &places, - const std::string &loss_var_name, - const std::unordered_set ¶ms, - const std::vector &local_scopes, - platform::NCCLContextMap *nccl_ctxs, - bool use_default_grad_scale); -#else - MultiDevSSAGraphBuilder(const std::vector &places, - const std::string &loss_var_name, - const std::unordered_set ¶ms, - const std::vector &local_scopes, - bool use_default_grad_scale); -#endif - - std::unique_ptr Build(const ProgramDesc &program) const override; - - private: - void CreateOpHandleIOs(SSAGraph *result, const OpDesc &op, - size_t place_id) const; - - private: - std::string loss_var_name_; - const std::vector &places_; - const std::vector &local_scopes_; - std::unordered_set grad_names_; - -#ifdef PADDLE_WITH_CUDA - platform::NCCLContextMap *nccl_ctxs_; -#endif - bool use_default_grad_scale_; - - bool IsScaleLossOp(const OpDesc &op) const; - - void CreateSendOp(SSAGraph *result, const OpDesc &op) const; - - /** - * Is this operator as the end-point operator before/after send operator. - */ - bool IsDistTrainOp(const OpDesc &op, OpDesc *send_op) const; - - void CreateComputationalOps(SSAGraph *result, const OpDesc &op, - size_t num_places) const; - - void CreateScaleLossGradOp(SSAGraph *result) const; - VarHandle *CreateReduceOp(SSAGraph *result, const std::string &og, - int dst_dev_id) const; - void CreateComputationalOp(SSAGraph *result, const OpDesc &op, - int dev_id) const; - - bool IsParameterGradientOnce( - const std::string &og, - std::unordered_set *og_has_been_broadcast) const; - - void InsertNCCLAllReduceOp(SSAGraph *result, const std::string &og) const; - - void CreateBroadcastOp(SSAGraph *result, const std::string &p_name, - size_t src_dev_id) const; - - /** - * Get send op in the global block of program. - * nullptr if not found. - */ - OpDesc *GetSendOpDesc(const ProgramDesc &program) const; - - bool IsSparseGradient( - const std::unordered_map &var_types, - const std::string &og) const; -}; -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc new file mode 100644 index 0000000000..c9c255864a --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_check_pass.cc @@ -0,0 +1,94 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/multi_devices_graph_check_pass.h" +#include +#include "paddle/fluid/framework/ir/graph.h" + +namespace paddle { +namespace framework { +namespace details { + +bool SSAGraghBuilderWithChecker::IsValidGraph(const ir::Graph *graph) const { + std::unordered_map pending_ops; + std::unordered_set pending_vars; + std::unordered_set ready_vars; + std::unordered_set ready_ops; + + auto insert_pending_var = [&](VarHandleBase *var) { + pending_vars.insert(var); + if (var->GeneratedOp() == nullptr) { + ready_vars.emplace(var); + } + }; + + for (auto &var_map : graph->Get(kGraphVars)) { + for (auto &name_pair : var_map) { + for (auto &version_pair : name_pair.second) { + insert_pending_var(version_pair.get()); + } + } + } + + for (auto &var : graph->Get(kGraphDepVars)) { + insert_pending_var(var.get()); + } + + for (auto &op : graph->Get(kGraphOps)) { + if (op->Inputs().empty()) { + ready_ops.insert(op.get()); + } else { + pending_ops.insert({op.get(), op.get()->NoDupInputSize()}); + } + } + + auto run_all_ops = [&](std::unordered_set &set) { + for (auto *op : set) { + for (auto out : op->Outputs()) { + ready_vars.emplace(out); + } + } + set.clear(); + }; + + while (!pending_vars.empty()) { + run_all_ops(ready_ops); + + if (ready_vars.empty()) { + return false; + } + + for (auto ready_var : ready_vars) { + pending_vars.erase(ready_var); + for (auto *op : ready_var->PendingOps()) { + auto &deps = --pending_ops[op]; + if (deps == 0) { + ready_ops.insert(op); + } + } + } + ready_vars.clear(); + } + return true; +} +} // namespace details +} // namespace framework +} // namespace paddle + +REGISTER_PASS(multi_devices_check_pass, + paddle::framework::details::SSAGraghBuilderWithChecker) + .RequireGraphAttr(paddle::framework::details::kGraphVars) + .RequireGraphAttr(paddle::framework::details::kGraphDepVars) + .RequireGraphAttr(paddle::framework::details::kGraphOps) + .RequireGraphAttr(paddle::framework::details::kShardedVarDevice); diff --git a/paddle/fluid/framework/details/multi_devices_graph_check_pass.h b/paddle/fluid/framework/details/multi_devices_graph_check_pass.h new file mode 100644 index 0000000000..1e2b1867c3 --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_check_pass.h @@ -0,0 +1,38 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/framework/details/multi_devices_helper.h" + +#include + +namespace paddle { +namespace framework { +namespace details { + +class SSAGraghBuilderWithChecker : public ir::Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override { + PADDLE_ENFORCE(IsValidGraph(graph.get())); + return graph; + } + + bool IsValidGraph(const ir::Graph* graph) const; +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_pass.cc new file mode 100644 index 0000000000..c5a13e7e1f --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.cc @@ -0,0 +1,846 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/details/all_reduce_op_handle.h" +#include "paddle/fluid/framework/details/broadcast_op_handle.h" +#include "paddle/fluid/framework/details/computation_op_handle.h" +#include "paddle/fluid/framework/details/data_balance_op_handle.h" +#include "paddle/fluid/framework/details/multi_devices_graph_pass.h" +#include "paddle/fluid/framework/details/reduce_op_handle.h" +#include "paddle/fluid/framework/details/rpc_op_handle.h" +#include "paddle/fluid/framework/details/scale_loss_grad_op_handle.h" +#include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/op_info.h" +#include "paddle/fluid/framework/scope.h" + +namespace paddle { +namespace framework { +namespace details { +namespace { +void PolishGraphToSupportDataHazards(ir::Graph *graph) { + for (auto &var_map : graph->Get(kGraphVars)) { + for (auto &name_pair : var_map) { + if (name_pair.second.size() <= 1) { + continue; + } + auto it_new = name_pair.second.rbegin(); + auto it_old = name_pair.second.rbegin(); + ++it_old; + for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) { + OpHandleBase *write_op = (*it_new)->GeneratedOp(); + const auto &read_ops = (*it_old)->PendingOps(); + + for (auto *read_op : read_ops) { + // Manually add a dependency var from read_op to write_op; + if (read_op == write_op) { + // Read Write is the same op. + continue; + } + bool has_dep = false; + for (auto *r_out : read_op->Outputs()) { + for (auto *w_in : write_op->Inputs()) { + if (r_out->Node() == w_in->Node()) { + has_dep = true; + break; + } + } + } + if (has_dep) continue; + + auto *dep_var = new DummyVarHandle(graph->CreateControlDepVar()); + read_op->AddOutput(dep_var); + write_op->AddInput(dep_var); + graph->Get(kGraphDepVars).emplace(dep_var); + } + } + } + } +} + +VarHandle *CreateOrGetLatestVarHandle(ir::Graph *graph, ir::Node *node, + const platform::Place &place, + size_t place_offset) { + auto &var_holders = graph->Get(kGraphVars)[place_offset]; + auto &var_holder = var_holders[node->Name()]; + VarHandle *var = nullptr; + if (var_holder.empty()) { + if (node->Var()) { + var = new VarHandle(graph->CreateVarNode(node->Var()), 0, place_offset, + node->Name(), place); + } else { + var = new VarHandle( + graph->CreateEmptyNode(node->Name(), ir::Node::Type::kVariable), 0, + place_offset, node->Name(), place); + } + var_holder.emplace_back(var); + } else { + var = var_holder.rbegin()->get(); + } + return var; +} + +void CreateOpOutput(ir::Graph *graph, OpHandleBase *op_handle, + ir::Node *new_node, const platform::Place &place, + size_t place_offset) { + auto &vars = + graph->Get(kGraphVars)[place_offset][new_node->Name()]; + size_t version = vars.size(); + auto var = + new VarHandle(new_node, version, place_offset, new_node->Name(), place); + vars.emplace_back(var); + op_handle->AddOutput(var); +} + +void AddOutputToLeafOps(ir::Graph *graph) { + for (auto &op : graph->Get(kGraphOps)) { + if (!op->Outputs().empty()) { + continue; + } + auto *dummy_leaf = new DummyVarHandle(graph->CreateControlDepVar()); + graph->Get(kGraphDepVars).emplace(dummy_leaf); + op->AddOutput(dummy_leaf); + } +} +} // namespace + +static const char kLossVarName[] = "loss_var_name"; +static const char kPlaces[] = "places"; +static const char kParams[] = "params"; +static const char kLocalScopes[] = "local_scopes"; +static const char kStrategy[] = "strategy"; + +void MultiDevSSAGraphBuilder::Init() const { + loss_var_name_ = Get(kLossVarName); + places_ = Get>(kPlaces); + local_scopes_ = Get>(kLocalScopes); + strategy_ = Get(kStrategy); +#ifdef PADDLE_WITH_CUDA + nccl_ctxs_ = &Get("nccl_ctxs"); +#endif + + for (auto &p : Get>(kParams)) { + grad_names_.insert(GradVarName(p)); + } + balance_vars_.resize(places_.size(), 0); + if (strategy_.enable_data_balance_ && places_.size() == 1) { + LOG(WARNING) << "It is no need to enable data balance when there is only " + "one place. enable_data_balance is set to False."; + strategy_.enable_data_balance_ = false; + } +} + +void MultiDevSSAGraphBuilder::CreateOpHandleIOs(ir::Graph *result, + ir::Node *node, + size_t place_id) const { + auto p = places_[place_id]; + auto *op_handle = result->Get(kGraphOps).back().get(); + op_handle->SetDeviceContext(p, + platform::DeviceContextPool::Instance().Get(p)); + + for (ir::Node *input : node->inputs) { + VarHandle *var = CreateOrGetLatestVarHandle(result, input, p, place_id); + op_handle->AddInput(var); + } + + for (ir::Node *output : node->outputs) { + ir::Node *new_node = nullptr; + if (output->Var()) { + new_node = result->CreateVarNode(output->Var()); + } else { + new_node = + result->CreateEmptyNode(output->Name(), ir::Node::Type::kVariable); + } + CreateOpOutput(result, op_handle, new_node, p, place_id); + } +} + +std::vector MultiDevSSAGraphBuilder::FindDistTrainSendVars( + const std::vector &nodes) const { + std::vector send_vars; + // since parameters are all in block 0, + // it's enough to only scan send ops in block 0 + for (auto &node : nodes) { + OpDesc *op = node->Op(); + // TODO(Yancey1989): use a graceful method to find send op, + // instead of the the hard code string + if (op->Type() == "send") { + auto op_vars = op->InputArgumentNames(); + send_vars.reserve(send_vars.size() + + std::distance(op_vars.begin(), op_vars.end())); + send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end()); + } + } + return send_vars; +} + +std::vector MultiDevSSAGraphBuilder::FindDistTrainRecvVars( + const std::vector &nodes) const { + std::vector recv_vars; + for (auto &node : nodes) { + OpDesc *op = node->Op(); + // TODO(Yancey1989): use a graceful method to find recv op, + // instead of the hard code string + if (op->Type() == "recv") { + auto op_vars = op->OutputArgumentNames(); + recv_vars.reserve(recv_vars.size() + + std::distance(op_vars.begin(), op_vars.end())); + recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end()); + } + } + return recv_vars; +} + +bool MultiDevSSAGraphBuilder::IsDistTrainOp( + ir::Node *node, const std::vector &send_vars, + const std::vector &recv_vars) const { + if (send_vars.size() == 0 || recv_vars.size() == 0) { + return false; + } + + /** + * Check any of opvars contains `.block` and in sendvars + */ + auto checker = [](const std::vector &opvars, + const std::vector &rpc_vars) -> bool { + for (auto &var : opvars) { + // a variable name with the suffix `.block` means it's a splited + // variable by (DistributeTranspiler) + // [python/paddle/fluid/transpiler/distribute_transpiler.py] + if (var.find(".block") != std::string::npos && + std::find(rpc_vars.begin(), rpc_vars.end(), var) != rpc_vars.end()) { + return true; + } + } + return false; + }; + + std::vector input_var_names; + std::vector output_var_names; + for (ir::Node *input : node->inputs) { + input_var_names.push_back(input->Name()); + } + for (ir::Node *output : node->outputs) { + output_var_names.push_back(output->Name()); + } + + return checker(output_var_names, send_vars) || + checker(input_var_names, recv_vars); +} + +size_t MultiDevSSAGraphBuilder::GetAppropriateDeviceID( + const std::vector &var_names) const { + int64_t numel_sum = 0; + for (auto var_name : var_names) { + if (all_vars_.find(var_name) == all_vars_.end()) continue; + auto var_desc = all_vars_.at(var_name); + PADDLE_ENFORCE_NOT_NULL(var_desc); + auto dim = framework::make_ddim(var_desc->GetShape()); + int64_t numel = framework::product(dim); + PADDLE_ENFORCE_GT(numel, 0); + numel_sum += numel; + } + + auto smallest = + std::min_element(std::begin(balance_vars_), std::end(balance_vars_)); + size_t dev_id = + static_cast(std::distance(std::begin(balance_vars_), smallest)); + balance_vars_[dev_id] += numel_sum; + return dev_id; +} + +// Topology sort the graph nodes from inputs to outputs. +// Since SSAGraphBuilder depends on forward/backward nodes to assign devices +// to parameter/gradients before optimizer ops, topo sort is insufficient. ( +// some optimizer ops might not depend on any nodes), we manually move all +// optimizer nodes after last backward nodes. +// However, the assumption by SSAGraphBuilder should be relaxed in the future. +std::vector SortOpsAndDelayOptimizeOp(const ir::Graph &graph) { + std::vector ret = ir::TopologySortOperations(graph); + size_t last_backward = 0; + for (size_t i = 0; i < ret.size(); ++i) { + if (boost::get( + ret[i]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == + static_cast(OpRole::kBackward)) { + last_backward = i; + } + } + + std::vector optimize_ops; + std::vector sorted_ret; + for (size_t i = 0; i < ret.size(); ++i) { + if (i < last_backward) { + if (boost::get(ret[i]->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())) == + static_cast(OpRole::kOptimize)) { + optimize_ops.push_back(ret[i]); + } else { + sorted_ret.push_back(ret[i]); + } + } else if (i == last_backward) { + sorted_ret.push_back(ret[i]); + // Verify that no operations before optimize ops depends on optimize ops. + std::unordered_set optimize_set(optimize_ops.begin(), + optimize_ops.end()); + for (ir::Node *n : sorted_ret) { + for (ir::Node *in : n->inputs) { + for (ir::Node *pre_n : in->inputs) { + PADDLE_ENFORCE(optimize_set.find(pre_n) == optimize_set.end(), + "optimize operations cannot be depended by forward " + "or backward node %s -> %s", + pre_n->Name(), n->Name()); + } + } + } + sorted_ret.insert(sorted_ret.end(), optimize_ops.begin(), + optimize_ops.end()); + } else { + sorted_ret.push_back(ret[i]); + } + } + return sorted_ret; +} + +std::unique_ptr MultiDevSSAGraphBuilder::ApplyImpl( + std::unique_ptr graph) const { + Init(); + // Give the topology sort order and rebuild the graph structure. + std::vector sorted_ops = SortOpsAndDelayOptimizeOp(*graph); + auto nodes = graph->ReleaseNodes(); + ir::Graph &result = *graph; + + for (auto &node : nodes) { + if (node->NodeType() == ir::Node::Type::kVariable && node->Var()) { + all_vars_.emplace(node->Name(), node->Var()); + } + } + std::unordered_set og_has_been_broadcast; + + // We cannot invoke resize. It is a bug of GCC 4.8 + result.Set(kGraphVars, new GraphVars(places_.size())); + result.Set(kGraphDepVars, new GraphDepVars); + result.Set(kGraphOps, new GraphOps); + result.Set(kShardedVarDevice, new ShardedVarDevice); + + // find send/recv vars so that we can place the distributed training + // related op in the place 0 + auto send_vars = FindDistTrainSendVars(sorted_ops); + auto recv_vars = FindDistTrainRecvVars(sorted_ops); + + std::vector> bcast_var_name_set; + bcast_var_name_set.resize(places_.size()); + + size_t cur_device_id = 0; + bool is_forwarding = true; + + for (ir::Node *node : sorted_ops) { + if (boost::get( + node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == + static_cast(OpRole::kRPC)) { + CreateRPCOp(&result, node); + } else if (IsDistTrainOp(node, send_vars, recv_vars)) { + CreateDistTrainOp(&result, node); + } else if (IsScaleLossOp(node)) { + // user can customize loss@grad if not use_default_grad_scale_ + if (strategy_.gradient_scale_ != + BuildStrategy::GradientScaleStrategy::kCustomized) { + // TODO(paddle-dev): Why is there no input for this op_handle? + auto loss_grad_name = node->Op()->OutputArgumentNames()[0]; + CreateScaleLossGradOp(&result, loss_grad_name); + } + // This assumes the backward generating code will ensure IsScaleLossOp + // is true only for the op that scale the final scalar loss. + // It also assumes backward op will always follow the forward op in + // the block. + is_forwarding = false; + } else { + int op_dev_id = GetOpDeviceID(result, node); + if (op_dev_id != -1) { // This op only runs on one specific device. + CreateComputationalOp(&result, node, op_dev_id); + for (ir::Node *n : node->outputs) { + graph->Get(kShardedVarDevice) + .emplace(n->Name(), op_dev_id); + } + } else { + // This op runs on all devices, and its output may have parameter's + // gradients. + // TODO(paddle-dev): Why is so special about "read" op? + if (node->Op()->Type() == "read" && strategy_.enable_data_balance_) { + node->Op()->SetAttr("throw_eof_exp", false); + CreateComputationalOps(&result, node, places_.size()); + const auto &data_var_names = node->Op()->Output("Out"); + InsertDataBalanceOp(&result, data_var_names); + } else { + CreateComputationalOps(&result, node, places_.size()); + } + + if (!is_forwarding && places_.size() > 1) { + // Currently, we assume that once gradient is generated, it can be + // broadcast, and each gradient is only broadcast once. + if (static_cast(boost::get(node->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())) & + static_cast(OpRole::kBackward))) { + try { + auto backward_vars = boost::get>( + node->Op()->GetNullableAttr( + OpProtoAndCheckerMaker::OpRoleVarAttrName())); + + PADDLE_ENFORCE_EQ(backward_vars.size() % 2, 0); + + for (size_t i = 0; i < backward_vars.size(); i += 2) { + auto &p_name = backward_vars[i]; + auto &g_name = backward_vars[i + 1]; + VLOG(10) << "Bcast " << g_name << " for parameter " << p_name; + + switch (strategy_.reduce_) { + case BuildStrategy::ReduceStrategy::kReduce: + cur_device_id = GetAppropriateDeviceID({g_name}); + CreateReduceOp(&result, g_name, cur_device_id); + graph->Get(kShardedVarDevice) + .emplace(g_name, cur_device_id); + bcast_var_name_set[cur_device_id].emplace(p_name); + break; + case BuildStrategy::ReduceStrategy::kAllReduce: + if (IsSparseGradient(g_name)) { + CreateReduceOp(&result, g_name, 0); + CreateBroadcastOp(&result, g_name, 0); + } else { + InsertAllReduceOp(&result, g_name); + } + break; + default: + LOG(FATAL) << "Unknown reduce strategy "; + break; + } + } + } catch (boost::bad_get e) { + } + } + } + } + } + } + + bool use_gpu = false; +#ifdef PADDLE_WITH_CUDA + use_gpu = nccl_ctxs_ != nullptr; +#endif + + if (use_gpu || + strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) { + // Insert BCast Ops + for (size_t dev_id = 0; dev_id < bcast_var_name_set.size(); ++dev_id) { + auto &to_bcast_set = bcast_var_name_set[dev_id]; + for (auto &bcast_name : to_bcast_set) { + CreateBroadcastOp(&result, bcast_name, dev_id); + } + } + } + /* + Dependency graph has been constructed. However, there are still data + hazards need to be handled. + */ + PolishGraphToSupportDataHazards(&result); + + /* + * Only variables should be the leaves of graph. + */ + AddOutputToLeafOps(&result); + PADDLE_ENFORCE(!ir::HasCircle(result)); + return graph; +} + +bool MultiDevSSAGraphBuilder::IsSparseGradient(const std::string &og) const { + PADDLE_ENFORCE(all_vars_.count(og) != 0); + if (all_vars_.at(og)->GetType() == proto::VarType::SELECTED_ROWS) { + return true; + } + return false; +} + +void MultiDevSSAGraphBuilder::SetCommunicationContext( + OpHandleBase *op_handle, const platform::Place &p) const { +#ifdef PADDLE_WITH_CUDA + if (nccl_ctxs_ == nullptr) { + op_handle->SetDeviceContext(p, + platform::DeviceContextPool::Instance().Get(p)); + } +#else + op_handle->SetDeviceContext(p, + platform::DeviceContextPool::Instance().Get(p)); +#endif +} + +void MultiDevSSAGraphBuilder::CreateBroadcastOp(ir::Graph *result, + const std::string &p_name, + size_t src_dev_id) const { +#ifdef PADDLE_WITH_CUDA + auto *op_handle = new BroadcastOpHandle( + result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation), + local_scopes_, places_, nccl_ctxs_); +#else + auto *op_handle = new BroadcastOpHandle( + result->CreateEmptyNode("broadcast", ir::Node::Type::kOperation), + local_scopes_, places_); +#endif + result->Get(kGraphOps).emplace_back(op_handle); + + auto *in = + result->Get(kGraphVars).at(src_dev_id).at(p_name).back().get(); + op_handle->AddInput(in); + + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + SetCommunicationContext(op_handle, p); + auto &vars = result->Get(kGraphVars).at(i).at(p_name); + auto *out_var = new VarHandle( + result->CreateEmptyNode(p_name, ir::Node::Type::kVariable), vars.size(), + i, p_name, p); + vars.emplace_back(out_var); + op_handle->AddOutput(out_var); + } +} + +void MultiDevSSAGraphBuilder::CreateComputationalOp(ir::Graph *result, + ir::Node *node, + int dev_id) const { + result->Get(kGraphOps).emplace_back( + new ComputationOpHandle(result->CreateOpNode(node->Op()), + local_scopes_[dev_id], places_[dev_id])); + CreateOpHandleIOs(result, node, dev_id); +} + +void MultiDevSSAGraphBuilder::InsertAllReduceOp(ir::Graph *result, + const std::string &og) const { +#ifdef PADDLE_WITH_CUDA + result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + local_scopes_, places_, nccl_ctxs_)); +#else + result->Get(kGraphOps).emplace_back(new AllReduceOpHandle( + result->CreateEmptyNode("allreduce", ir::Node::Type::kOperation), + local_scopes_, places_)); +#endif + auto *op_handle = result->Get(kGraphOps).back().get(); + + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + SetCommunicationContext(op_handle, p); + auto &vars = result->Get(kGraphVars)[i][og]; + PADDLE_ENFORCE(!vars.empty()); + auto &prev_grad = vars.back(); + op_handle->AddInput(prev_grad.get()); + + auto var = + new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), + vars.size(), i, og, p); + vars.emplace_back(var); + op_handle->AddOutput(var); + } +} + +void MultiDevSSAGraphBuilder::InsertDataBalanceOp( + ir::Graph *result, const std::vector &datas) const { +#ifdef PADDLE_WITH_CUDA + result->Get(kGraphOps).emplace_back(new DataBalanceOpHandle( + result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation), + local_scopes_, places_, nccl_ctxs_)); +#else + result->Get(kGraphOps).emplace_back(new DataBalanceOpHandle( + result->CreateEmptyNode("data_balance", ir::Node::Type::kOperation), + local_scopes_, places_)); +#endif + auto *op_handle = result->Get(kGraphOps).back().get(); + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + SetCommunicationContext(op_handle, p); + for (const std::string &d_name : datas) { + auto &vars = result->Get(kGraphVars)[i][d_name]; + PADDLE_ENFORCE(!vars.empty()); + op_handle->AddInput(vars.back().get()); + auto var = new VarHandle( + result->CreateEmptyNode(d_name, ir::Node::Type::kVariable), + vars.size(), i, d_name, p); + vars.emplace_back(var); + op_handle->AddOutput(var); + } + } +} + +bool MultiDevSSAGraphBuilder::IsParameterGradientOnce( + const std::string &og, + std::unordered_set *og_has_been_broadcast) const { + bool is_pg_once = + grad_names_.count(og) != 0 && og_has_been_broadcast->count(og) == 0; + if (is_pg_once) { + // Insert NCCL AllReduce Op + og_has_been_broadcast->insert(og); + } + return is_pg_once; +} + +int MultiDevSSAGraphBuilder::GetOpDeviceID(const ir::Graph &graph, + ir::Node *node) const { + if (strategy_.reduce_ != BuildStrategy::ReduceStrategy::kReduce) { + return -1; + } + int op_role = boost::get( + node->Op()->GetAttr(framework::OpProtoAndCheckerMaker::OpRoleAttrName())); + if (op_role != static_cast(framework::OpRole::kOptimize)) { + return -1; + } + auto param_grad = boost::get>( + node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); + + PADDLE_ENFORCE_EQ(param_grad.size(), 2U); + int dev_id = GetVarDeviceID(graph, param_grad[1]); + PADDLE_ENFORCE_NE(dev_id, -1, "dev_id should not be -1.[%s, %s, %s]", + node->Op()->Type(), param_grad[0], param_grad[1]); + return dev_id; +} + +int MultiDevSSAGraphBuilder::GetVarDeviceID(const ir::Graph &graph, + const std::string &varname) const { + auto &sharded_var_device = graph.Get(kShardedVarDevice); + auto got = sharded_var_device.find(varname); + return got == sharded_var_device.end() ? -1 : got->second; +} + +void MultiDevSSAGraphBuilder::CreateScaleLossGradOp( + ir::Graph *result, const std::string &loss_grad_name) const { + for (size_t i = 0; i < places_.size(); ++i) { +// Insert ScaleCost OpHandle +#ifdef PADDLE_WITH_CUDA + auto *communication_dev_ctx = + nccl_ctxs_ ? nccl_ctxs_->DevCtx(places_[i]) + : platform::DeviceContextPool::Instance().Get(places_[i]); +#else + auto *communication_dev_ctx = + platform::DeviceContextPool::Instance().Get(platform::CPUPlace()); +#endif + auto *op_handle = new ScaleLossGradOpHandle( + result->CreateEmptyNode("scale_loss_grad", ir::Node::Type::kOperation), + local_scopes_.size(), local_scopes_[i], places_[i], + communication_dev_ctx); + result->Get(kGraphOps).emplace_back(op_handle); + + // FIXME: Currently ScaleLossGradOp only use device_count as scale + // factor. So it does not depend on any other operators. + // VarHandle *loss = GetVarHandle(loss_var_name, place); + // loss->pending_ops_.emplace_back(op_handle); + // op_handle->inputs_.emplace_back(loss); + + CreateOpOutput( + result, op_handle, + result->CreateEmptyNode(loss_grad_name, ir::Node::Type::kVariable), + places_[i], i); + } +} + +void MultiDevSSAGraphBuilder::CreateComputationalOps(ir::Graph *result, + ir::Node *node, + size_t num_places) const { + for (size_t scope_idx = 0; scope_idx < num_places; ++scope_idx) { + auto p = places_[scope_idx]; + auto s = local_scopes_[scope_idx]; + result->Get(kGraphOps).emplace_back( + new ComputationOpHandle(result->CreateOpNode(node->Op()), s, p)); + CreateOpHandleIOs(result, node, scope_idx); + } +} + +VarHandle *MultiDevSSAGraphBuilder::CreateReduceOp(ir::Graph *result, + const std::string &og, + int dst_dev_id) const { +#ifdef PADDLE_WITH_CUDA + result->Get(kGraphOps).emplace_back(new ReduceOpHandle( + result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), + local_scopes_, places_, nccl_ctxs_)); +#else + result->Get(kGraphOps).emplace_back(new ReduceOpHandle( + result->CreateEmptyNode("reduce", ir::Node::Type::kOperation), + local_scopes_, places_)); +#endif + auto *op_handle = result->Get(kGraphOps).back().get(); + + for (size_t i = 0; i < places_.size(); ++i) { + auto &p = places_[i]; + SetCommunicationContext(op_handle, p); + auto &vars = result->Get(kGraphVars)[i][og]; + PADDLE_ENFORCE(!vars.empty()); + auto &prev_grad = vars.back(); + op_handle->AddInput(prev_grad.get()); + } + auto &vars = result->Get(kGraphVars)[dst_dev_id][og]; + auto var = + new VarHandle(result->CreateEmptyNode(og, ir::Node::Type::kVariable), + vars.size(), dst_dev_id, og, places_[dst_dev_id]); + vars.emplace_back(var); + op_handle->AddOutput(var); + return var; +} + +// Find the first occurence of `prev_op_name` and make current `op` depend +// on it. +void MultiDevSSAGraphBuilder::ConnectOp(ir::Graph *result, OpHandleBase *op, + const std::string &prev_op_name) const { + for (auto &prev_op : result->Get(kGraphOps)) { + if (prev_op->Name() == prev_op_name) { + auto *dep_var = new DummyVarHandle(result->CreateControlDepVar()); + prev_op->AddOutput(dep_var); + result->Get(kGraphDepVars).emplace(dep_var); + op->AddInput(dep_var); + } + } +} + +void MultiDevSSAGraphBuilder::CreateDistTrainOp(ir::Graph *result, + ir::Node *node) const { + int op_dev_id = -1; + std::vector input_var_names; + std::vector output_var_names; + for (ir::Node *input : node->inputs) { + input_var_names.push_back(input->Name()); + } + for (ir::Node *output : node->outputs) { + output_var_names.push_back(output->Name()); + } + + if (node->Op()->Type() == "split_byref" || + node->Op()->Type() == "split_selected_rows") { + // TODO(paddle-dev): getting the first var is not safe. + op_dev_id = GetVarDeviceID(*result, input_var_names[0]); + if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce) { + op_dev_id = GetAppropriateDeviceID(input_var_names); + for (auto &varname : input_var_names) { + result->Get(kShardedVarDevice) + .emplace(varname, op_dev_id); + } + } + for (auto &varname : output_var_names) { + result->Get(kShardedVarDevice) + .emplace(varname, op_dev_id); + } + } else if (node->Op()->Type() == "concat") { + op_dev_id = GetVarDeviceID(*result, input_var_names[0]); + for (auto &varname : output_var_names) { + result->Get(kShardedVarDevice) + .emplace(varname, op_dev_id); + } + } else { + PADDLE_ENFORCE( + "the distribute training related op should be in [split_byref, " + "concat]."); + } + + PADDLE_ENFORCE(op_dev_id != -1, + "can not find right place for distributed op: %s", + node->Op()->Type()); + + CreateComputationalOp(result, node, op_dev_id); + if (node->Op()->Type() == "concat") { + ConnectOp(result, result->Get(kGraphOps).back().get(), + "fetch_barrier"); + } +} + +// Create RPC related op handles that connects its in ops and out ops. +void MultiDevSSAGraphBuilder::CreateRPCOp(ir::Graph *result, + ir::Node *node) const { + int op_dev_id = -1; + if (node->Op()->Type() == "send") { + // TODO(paddle-dev): getting the first var is not safe. + op_dev_id = GetVarDeviceID(*result, node->inputs[0]->Name()); + PADDLE_ENFORCE(!ir::IsControlDepVar(*node->inputs[0]), + "This hack no longer holds, please fix."); + // the variable name which contains .block means it was splited by + // split_byref op + // so that we can balance the variable blocks to all the pserver + // instances. + if (strategy_.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce && + node->inputs[0]->Name().find(".block") == std::string::npos) { + std::vector input_var_names; + for (ir::Node *n : node->inputs) { + input_var_names.push_back(n->Name()); + } + op_dev_id = GetAppropriateDeviceID(input_var_names); + for (auto &varname : input_var_names) { + result->Get(kShardedVarDevice) + .emplace(varname, op_dev_id); + } + } + } else if (node->Op()->Type() == "recv") { + std::vector output_var_names; + for (ir::Node *n : node->outputs) { + output_var_names.push_back(n->Name()); + } + op_dev_id = GetAppropriateDeviceID(output_var_names); + for (auto &varname : output_var_names) { + result->Get(kShardedVarDevice) + .emplace(varname, op_dev_id); + } + } else { + // send_barrier and fetch_barrier op can be scheduled on device 0 + op_dev_id = 0; + } + + PADDLE_ENFORCE(op_dev_id != -1, "can not find the right place for rpc op: %s", + node->Op()->Type()); + + result->Get(kGraphOps).emplace_back(new RPCOpHandle( + result->CreateOpNode(node->Op()), *node->Op(), local_scopes_[op_dev_id], + node->Op()->Type(), places_[op_dev_id])); + + // TODO(panyx0718): This might not be needed anymore. + if (node->Op()->Type() == "send_barrier") { + ConnectOp(result, result->Get(kGraphOps).back().get(), "send"); + } else if (node->Op()->Type() == "recv") { + ConnectOp(result, result->Get(kGraphOps).back().get(), + "send_barrier"); + } else if (node->Op()->Type() == "fetch_barrier") { + ConnectOp(result, result->Get(kGraphOps).back().get(), "recv"); + } else if (node->Op()->Type() == "send") { + // do nothing + } else { + PADDLE_THROW( + "rpc op should be in [" + "send, send_barrier. recv, fetch_barrier]"); + } + + CreateOpHandleIOs(result, node, op_dev_id); +} + +bool MultiDevSSAGraphBuilder::IsScaleLossOp(ir::Node *node) const { + return boost::get( + node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == + (static_cast(OpRole::kBackward) | + static_cast(OpRole::kLoss)) && + !loss_var_name_.empty(); // If loss_var is empty. This is test mode +} +} // namespace details +} // namespace framework +} // namespace paddle + +REGISTER_PASS(multi_devices_pass, + paddle::framework::details::MultiDevSSAGraphBuilder) + .RequirePassAttr(paddle::framework::details::kLossVarName) + .RequirePassAttr(paddle::framework::details::kPlaces) + .RequirePassAttr(paddle::framework::details::kParams) + .RequirePassAttr(paddle::framework::details::kLocalScopes) + .RequirePassAttr(paddle::framework::details::kStrategy); diff --git a/paddle/fluid/framework/details/multi_devices_graph_pass.h b/paddle/fluid/framework/details/multi_devices_graph_pass.h new file mode 100644 index 0000000000..7a6f238f9c --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_pass.h @@ -0,0 +1,115 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include + +#include "paddle/fluid/framework/details/build_strategy.h" +#include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/framework/ir/graph.h" + +namespace paddle { +namespace platform { +class NCCLContextMap; +} + +namespace framework { +class Scope; +namespace details { + +class MultiDevSSAGraphBuilder : public ir::Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; + + private: + void CreateOpHandleIOs(ir::Graph *result, ir::Node *node, + size_t device_id) const; + void Init() const; + + private: + mutable std::string loss_var_name_; + mutable std::vector places_; + mutable std::vector local_scopes_; + mutable std::unordered_set grad_names_; + +#ifdef PADDLE_WITH_CUDA + mutable platform::NCCLContextMap *nccl_ctxs_; +#endif + + int GetVarDeviceID(const ir::Graph &graph, const std::string &varname) const; + + bool IsScaleLossOp(ir::Node *node) const; + + void CreateRPCOp(ir::Graph *result, ir::Node *node) const; + void CreateDistTrainOp(ir::Graph *result, ir::Node *node) const; + + /** + * Is this operator as the end-point operator before/after send operator. + */ + bool IsDistTrainOp(ir::Node *node, const std::vector &send_vars, + const std::vector &recv_vars) const; + + std::vector FindDistTrainSendVars( + const std::vector &nodes) const; + + std::vector FindDistTrainRecvVars( + const std::vector &nodes) const; + + void ConnectOp(ir::Graph *result, OpHandleBase *op, + const std::string &prev_op_name) const; + + void CreateComputationalOps(ir::Graph *result, ir::Node *node, + size_t num_places) const; + + void CreateScaleLossGradOp(ir::Graph *result, + const std::string &loss_grad_name) const; + + VarHandle *CreateReduceOp(ir::Graph *result, const std::string &og, + int dst_dev_id) const; + void CreateComputationalOp(ir::Graph *result, ir::Node *node, + int dev_id) const; + + bool IsParameterGradientOnce( + const std::string &og, + std::unordered_set *og_has_been_broadcast) const; + + int GetOpDeviceID(const ir::Graph &graph, ir::Node *node) const; + + void InsertAllReduceOp(ir::Graph *result, const std::string &og) const; + + void InsertDataBalanceOp(ir::Graph *result, + const std::vector &datas) const; + + void CreateBroadcastOp(ir::Graph *result, const std::string &p_name, + size_t src_dev_id) const; + + bool IsSparseGradient(const std::string &og) const; + + size_t GetAppropriateDeviceID( + const std::vector &var_names) const; + + private: + mutable BuildStrategy strategy_; + mutable std::unordered_map all_vars_; + mutable std::vector balance_vars_; + + void SetCommunicationContext(OpHandleBase *op_handle, + const platform::Place &p) const; +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc new file mode 100644 index 0000000000..69944a42b6 --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_print_pass.cc @@ -0,0 +1,86 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" +#include +#include "paddle/fluid/framework/ir/graph.h" + +namespace paddle { +namespace framework { +namespace details { + +template +static inline void IterAllVar(const ir::Graph &graph, Callback callback) { + for (auto &each : graph.Get(kGraphVars)) { + for (auto &pair1 : each) { + for (auto &pair2 : pair1.second) { + callback(*pair2); + } + } + } + + for (auto &var : graph.Get(kGraphDepVars)) { + callback(*var); + } +} + +void GraphvizSSAGraphPrinter::Print(const ir::Graph &graph, + std::ostream &sout) const { + size_t var_id = 0; + std::unordered_map vars; + + sout << "digraph G {\n"; + + IterAllVar(graph, [&](const VarHandleBase &var) { + auto *var_ptr = &var; + auto *var_handle_ptr = dynamic_cast(var_ptr); + auto *dummy_ptr = dynamic_cast(var_ptr); + + size_t cur_var_id = var_id++; + vars[var_ptr] = cur_var_id; + + if (var_handle_ptr) { + sout << "var_" << cur_var_id << " [label=\"" << var_handle_ptr->name_ + << "\\n" + << var_handle_ptr->place_ << "\\n" + << var_handle_ptr->version_ << "\"]" << std::endl; + } else if (dummy_ptr) { + sout << "var_" << cur_var_id << " [label=\"dummy\"]" << std::endl; + } + }); + + size_t op_id = 0; + for (auto &op : graph.Get(kGraphOps)) { + std::string op_name = "op_" + std::to_string(op_id++); + sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]" + << std::endl; + for (auto in : op->Inputs()) { + std::string var_name = "var_" + std::to_string(vars[in]); + sout << var_name << " -> " << op_name << std::endl; + } + + for (auto out : op->Outputs()) { + std::string var_name = "var_" + std::to_string(vars[out]); + sout << op_name << " -> " << var_name << std::endl; + } + } + + sout << "}\n"; +} +} // namespace details +} // namespace framework +} // namespace paddle + +REGISTER_PASS(multi_devices_print_pass, + paddle::framework::details::SSAGraghBuilderWithPrinter); diff --git a/paddle/fluid/framework/details/multi_devices_graph_print_pass.h b/paddle/fluid/framework/details/multi_devices_graph_print_pass.h new file mode 100644 index 0000000000..c00685fa16 --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_graph_print_pass.h @@ -0,0 +1,52 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include +#include "paddle/fluid/framework/details/multi_devices_helper.h" + +namespace paddle { +namespace framework { +namespace details { + +class SSAGraphPrinter { + public: + virtual ~SSAGraphPrinter() {} + virtual void Print(const ir::Graph& graph, std::ostream& sout) const = 0; +}; + +class GraphvizSSAGraphPrinter : public SSAGraphPrinter { + public: + void Print(const ir::Graph& graph, std::ostream& sout) const override; +}; + +class SSAGraghBuilderWithPrinter : public ir::Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override { + std::unique_ptr fout( + new std::ofstream(Get("debug_graphviz_path"))); + PADDLE_ENFORCE(fout->good()); + Get("graph_printer").Print(*graph, *fout); + return graph; + } +}; + +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_helper.cc b/paddle/fluid/framework/details/multi_devices_helper.cc new file mode 100644 index 0000000000..0242274a16 --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_helper.cc @@ -0,0 +1,20 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/framework/details/multi_devices_helper.h" + +namespace paddle { +namespace framework { +namespace details {} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h new file mode 100644 index 0000000000..175c5a9950 --- /dev/null +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -0,0 +1,57 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/details/var_handle.h" + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/platform/place.h" + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace details { + +// all variable in each devices. +// The outside vector is the device vector. Each element of this vector is a +// map from variable name to variables. The variables, who have the same name, +// will have a differsent version. The offset in the +// `std::vector>` is the version of varaibles. +typedef std::vector< + std::unordered_map>>> + GraphVars; +const char kGraphVars[] = "vars"; + +// aux variables to represent dependency. Useful to resolve data hazard. +typedef std::unordered_set> GraphDepVars; +const char kGraphDepVars[] = "dep_vars"; + +// all operators. NOTE that even we use a vector here, the operators is +// unordered. +typedef std::vector> GraphOps; +const char kGraphOps[] = "ops"; + +typedef std::unordered_map ShardedVarDevice; +const char kShardedVarDevice[] = "sharded_var_device"; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/op_handle_base.cc b/paddle/fluid/framework/details/op_handle_base.cc index 534d77860f..ee9f9184da 100644 --- a/paddle/fluid/framework/details/op_handle_base.cc +++ b/paddle/fluid/framework/details/op_handle_base.cc @@ -11,8 +11,8 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - #include "paddle/fluid/framework/details/op_handle_base.h" +#include namespace paddle { namespace framework { @@ -39,9 +39,9 @@ OpHandleBase::~OpHandleBase() { #endif } -void OpHandleBase::Run(bool use_event) { +void OpHandleBase::Run(bool use_cuda) { #ifdef PADDLE_WITH_CUDA - if (events_.empty() && use_event) { + if (events_.empty() && use_cuda) { for (auto &p : dev_ctxes_) { int dev_id = boost::get(p.first).device; PADDLE_ENFORCE(cudaSetDevice(dev_id)); @@ -50,21 +50,23 @@ void OpHandleBase::Run(bool use_event) { } } #else - PADDLE_ENFORCE(!use_event); + PADDLE_ENFORCE(!use_cuda); #endif RunImpl(); } -void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { +void OpHandleBase::RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx) { #ifdef PADDLE_WITH_CUDA - if (platform::is_cpu_place(waited_dev->GetPlace()) || events_.empty()) { + PADDLE_ENFORCE_NOT_NULL(waited_ctx); + if (platform::is_cpu_place(waited_ctx->GetPlace()) || events_.empty()) { for (auto &dev_ctx : dev_ctxes_) { + PADDLE_ENFORCE_NOT_NULL(dev_ctx.second); dev_ctx.second->Wait(); } } else { auto stream = - static_cast(waited_dev)->stream(); + static_cast(waited_ctx)->stream(); for (auto &ev : events_) { PADDLE_ENFORCE(cudaStreamWaitEvent(stream, ev.second, 0)); } @@ -78,19 +80,52 @@ void OpHandleBase::Wait(platform::DeviceContext *waited_dev) { void OpHandleBase::AddInput(VarHandleBase *in) { this->inputs_.emplace_back(in); - in->pending_ops_.insert(this); + node_->inputs.push_back(in->Node()); + in->AddOutput(this, this->Node()); } void OpHandleBase::AddOutput(VarHandleBase *out) { outputs_.emplace_back(out); - out->generated_op_ = this; + node_->outputs.push_back(out->Node()); + out->AddInput(this, this->Node()); +} + +void OpHandleBase::WaitInputVarGenerated() { + for (auto in_var : inputs_) { + if (NeedWait(in_var)) { + for (auto &pair : dev_ctxes_) { + in_var->GeneratedOp()->RecordWaitEventOnCtx(pair.second); + } + } + } +} + +void OpHandleBase::WaitInputVarGenerated(const platform::Place &place) { + for (auto *in : inputs_) { + if (NeedWait(in)) { + in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_[place]); + } + } +} + +size_t OpHandleBase::NoDummyInputSize() const { + size_t cnt = 0; + for (auto *in : inputs_) { + if (dynamic_cast(in) == nullptr) { + ++cnt; + } + } + return cnt; +} + +bool OpHandleBase::NeedWait(VarHandleBase *in_var) { + return in_var && in_var->GeneratedOp(); } void OpHandleBase::RunAndRecordEvent(const std::function &callback) { #ifdef PADDLE_WITH_CUDA if (!events_.empty()) { // Use event std::function method = callback; - for (auto &p : dev_ctxes_) { method = [method, p, this]() { static_cast(p.second)->RecordEvent( diff --git a/paddle/fluid/framework/details/op_handle_base.h b/paddle/fluid/framework/details/op_handle_base.h index 00f213f3ed..2d7f189428 100644 --- a/paddle/fluid/framework/details/op_handle_base.h +++ b/paddle/fluid/framework/details/op_handle_base.h @@ -13,10 +13,11 @@ // limitations under the License. #pragma once +#include #include #include - #include "paddle/fluid/framework/details/var_handle.h" +#include "paddle/fluid/framework/ir/node.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/macros.h" @@ -26,9 +27,11 @@ namespace details { constexpr char kLocalExecScopeName[] = "@LCOAL_SCOPE@"; +// Wraps ir::Node and provide helper utilities. +// It's responsible for populating necessary fields of ir::Node. class OpHandleBase { public: - OpHandleBase() {} + explicit OpHandleBase(ir::Node *node) : node_(node) {} virtual ~OpHandleBase(); @@ -36,14 +39,26 @@ class OpHandleBase { virtual std::string Name() const = 0; - void Run(bool use_event); + void Run(bool use_cuda); - virtual void Wait(platform::DeviceContext *waited_dev); + virtual void RecordWaitEventOnCtx(platform::DeviceContext *waited_ctx); void AddInput(VarHandleBase *in); void AddOutput(VarHandleBase *out); + // This method adds the wait events of all the input on all the device + // context. + // NODE: This Wait is asynchronous operation. + virtual void WaitInputVarGenerated(); + + // This method adds the wait events of all the input on the specified device + // context. + // NODE: This Wait is asynchronous operation. + virtual void WaitInputVarGenerated(const platform::Place &place); + + virtual bool NeedWait(VarHandleBase *in_var); + // If the Op involves data transfer of multiple devices that // will likely block other computations. virtual bool IsMultiDeviceTransfer() { return false; } @@ -58,8 +73,20 @@ class OpHandleBase { const std::vector &Inputs() const { return inputs_; } + size_t NoDupInputSize() const { + std::unordered_set res; + for (auto *var : inputs_) { + res.emplace(var); + } + return res.size(); + } + const std::vector &Outputs() const { return outputs_; } + size_t NoDummyInputSize() const; + + ir::Node *Node() { return node_; } + protected: void RunAndRecordEvent(const std::function &callback); @@ -68,11 +95,10 @@ class OpHandleBase { virtual void RunImpl() = 0; + ir::Node *node_; std::vector inputs_; std::vector outputs_; - std::unordered_map - dev_ctxes_; + std::map dev_ctxes_; #ifdef PADDLE_WITH_CUDA std::unordered_map events_; diff --git a/paddle/fluid/framework/details/op_registry.h b/paddle/fluid/framework/details/op_registry.h index 06603db31e..eea7e712f8 100644 --- a/paddle/fluid/framework/details/op_registry.h +++ b/paddle/fluid/framework/details/op_registry.h @@ -95,8 +95,8 @@ struct OpInfoFiller { void operator()(const char* op_type, OpInfo* info) const { info->proto_ = new proto::OpProto; info->checker_ = new OpAttrChecker(); - auto maker = T(info->proto_, info->checker_); - maker.Validate(); + T maker; + maker(info->proto_, info->checker_); info->proto_->set_type(op_type); PADDLE_ENFORCE( info->proto_->IsInitialized(), diff --git a/paddle/fluid/framework/details/reduce_and_gather.h b/paddle/fluid/framework/details/reduce_and_gather.h index 2b95a28499..e28264eb32 100644 --- a/paddle/fluid/framework/details/reduce_and_gather.h +++ b/paddle/fluid/framework/details/reduce_and_gather.h @@ -35,12 +35,16 @@ struct ReduceLoDTensor { PADDLE_ENFORCE(!src_tensors_.empty()); auto &t0 = *src_tensors_[0]; PADDLE_ENFORCE_NE(t0.numel(), 0); + dst_tensor_.Resize(t0.dims()); T *dst = dst_tensor_.mutable_data(platform::CPUPlace()); - std::copy(t0.data(), t0.data() + t0.numel(), dst); - for (size_t i = 1; i < src_tensors_.size(); ++i) { + for (size_t i = 0; i < src_tensors_.size(); ++i) { auto &t = *src_tensors_[i]; + if (dst == t.data()) { + continue; + } + PADDLE_ENFORCE_EQ(t.dims(), t0.dims()); PADDLE_ENFORCE_EQ(t.type(), t0.type()); std::transform(t.data(), t.data() + t.numel(), dst, dst, @@ -52,8 +56,7 @@ struct ReduceLoDTensor { inline void GatherSelectedRows( const std::vector &src_selecte_rows_, const std::vector &in_places, - const std::unordered_map &dev_ctxes, + const std::map &dev_ctxes, const platform::Place &out_place, SelectedRows *dst_selecte_rows) { PADDLE_ENFORCE(!src_selecte_rows_.empty()); diff --git a/paddle/fluid/framework/details/reduce_op_handle.cc b/paddle/fluid/framework/details/reduce_op_handle.cc index 1bb04c1dfc..6c7e5c1fb0 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.cc +++ b/paddle/fluid/framework/details/reduce_op_handle.cc @@ -16,12 +16,18 @@ #include "paddle/fluid/framework/details/container_cast.h" #include "paddle/fluid/framework/details/reduce_and_gather.h" #include "paddle/fluid/framework/details/variable_visitor.h" +#include "paddle/fluid/platform/profiler.h" + +DEFINE_bool( + cpu_deterministic, false, + "Whether to make the result of computation deterministic in CPU side."); namespace paddle { namespace framework { namespace details { void ReduceOpHandle::RunImpl() { + platform::RecordEvent r("reduce", nullptr); if (places_.size() == 1) return; // the input and output may have dummy var. auto in_var_handles = DynamicCast(inputs_); @@ -51,7 +57,7 @@ void ReduceOpHandle::RunImpl() { PADDLE_ENFORCE_NOT_NULL(pre_in_var); // Wait input done, this Wait is asynchronous operation - WaitInputVarGenerated(in_var_handles); + WaitInputVarGenerated(); // NOTE: The Places of all input tensor must be all on CPU or all on GPU. std::vector in_places; // used to get dev_ctx @@ -80,19 +86,43 @@ void ReduceOpHandle::RunImpl() { } if (pre_in_var->IsType()) { - std::vector in_selected_rows = - GetInputValues(in_var_handles, var_scopes); - - GatherSelectedRows(in_selected_rows, in_places, dev_ctxes_, t_out_p, - out_var->GetMutable()); + this->RunAndRecordEvent([&] { + std::vector in_selected_rows = + GetInputValues(in_var_handles, var_scopes); + GatherSelectedRows(in_selected_rows, in_places, dev_ctxes_, t_out_p, + out_var->GetMutable()); + }); } else { std::vector lod_tensors = GetInputValues(in_var_handles, var_scopes); if (paddle::platform::is_cpu_place(lod_tensors[0]->place())) { - ReduceLoDTensor func(lod_tensors, - out_var->GetMutable()); - VisitDataType(ToDataType(lod_tensors[0]->type()), func); + this->RunAndRecordEvent([&] { + // FIXME(zcd): The order of summing is important, + // especially when the type of data is float or double. + // For example, the result of `a+b+c+d` may be different + // with the result of `c+a+b+d`, so the summing order should be fixed. + if (!FLAGS_cpu_deterministic) { + ReduceLoDTensor func(lod_tensors, + out_var->GetMutable()); + VisitDataType(ToDataType(lod_tensors[0]->type()), func); + } else { + // We sum lod_tensors to reduce_sum_trg which is in local_scopes_0 + // here, but it doesn't mean reduce_sum_trg must be in local_scopes_0. + auto &reduce_sum_trg = *this->local_scopes_[0] + ->FindVar(kLocalExecScopeName) + ->Get() + ->FindVar(out_var_handle->name_) + ->GetMutable(); + ReduceLoDTensor func(lod_tensors, &reduce_sum_trg); + VisitDataType(ToDataType(lod_tensors[0]->type()), func); + + auto trg = out_var->GetMutable(); + if (reduce_sum_trg.data() != trg->data()) { + TensorCopy(reduce_sum_trg, platform::CPUPlace(), trg); + } + } + }); } else if (paddle::platform::is_gpu_place(lod_tensors[0]->place())) { #ifdef PADDLE_WITH_CUDA auto pre_in = pre_in_var->Get(); @@ -157,17 +187,6 @@ std::vector ReduceOpHandle::GetInputValues( return in_selected_rows; } -void ReduceOpHandle::WaitInputVarGenerated( - const std::vector &in_var_handles) { - for (auto *in : in_var_handles) { - if (in->generated_op_) { - for (auto pair : dev_ctxes_) { - in->generated_op_->Wait(pair.second); - } - } - } -} - std::string ReduceOpHandle::Name() const { return "reduce"; } } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/reduce_op_handle.h b/paddle/fluid/framework/details/reduce_op_handle.h index 59731d348d..a6289b055f 100644 --- a/paddle/fluid/framework/details/reduce_op_handle.h +++ b/paddle/fluid/framework/details/reduce_op_handle.h @@ -32,15 +32,18 @@ namespace framework { namespace details { struct ReduceOpHandle : public OpHandleBase { - const std::vector &local_scopes_; - const std::vector &places_; + std::vector local_scopes_; + std::vector places_; #ifdef PADDLE_WITH_CUDA const platform::NCCLContextMap *nccl_ctxs_; - ReduceOpHandle(const std::vector &local_scopes, + ReduceOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places, const platform::NCCLContextMap *nccl_ctxs) - : local_scopes_(local_scopes), places_(places), nccl_ctxs_(nccl_ctxs) { + : OpHandleBase(node), + local_scopes_(local_scopes), + places_(places), + nccl_ctxs_(nccl_ctxs) { if (nccl_ctxs_) { for (auto &p_ctx : nccl_ctxs_->contexts_) { dev_ctxes_[platform::CUDAPlace(p_ctx.first)] = p_ctx.second.ctx_.get(); @@ -48,9 +51,9 @@ struct ReduceOpHandle : public OpHandleBase { } } #else - ReduceOpHandle(const std::vector &local_scopes, + ReduceOpHandle(ir::Node *node, const std::vector &local_scopes, const std::vector &places) - : local_scopes_(local_scopes), places_(places) {} + : OpHandleBase(node), local_scopes_(local_scopes), places_(places) {} #endif std::string Name() const override; @@ -60,8 +63,6 @@ struct ReduceOpHandle : public OpHandleBase { protected: void RunImpl() override; - void WaitInputVarGenerated(const std::vector &in_var_handles); - template std::vector GetInputValues( const std::vector &in_var_handles, diff --git a/paddle/fluid/framework/details/reduce_op_handle_test.cc b/paddle/fluid/framework/details/reduce_op_handle_test.cc index ffdd7c14eb..3a9a584123 100644 --- a/paddle/fluid/framework/details/reduce_op_handle_test.cc +++ b/paddle/fluid/framework/details/reduce_op_handle_test.cc @@ -84,6 +84,7 @@ struct TestReduceOpHandle { } void InitReduceOp(size_t out_scope_idx) { + std::vector> nodes; // init scope for (size_t j = 0; j < gpu_list_.size(); ++j) { local_scopes_.push_back(&(g_scope_.NewScope())); @@ -96,19 +97,21 @@ struct TestReduceOpHandle { } param_scopes_[out_scope_idx]->Var("out"); + nodes.emplace_back(new ir::Node("node")); if (use_gpu_) { #ifdef PADDLE_WITH_CUDA - op_handle_.reset( - new ReduceOpHandle(local_scopes_, gpu_list_, nccl_ctxs_.get())); + op_handle_.reset(new ReduceOpHandle(nodes.back().get(), local_scopes_, + gpu_list_, nccl_ctxs_.get())); #else PADDLE_THROW("CUDA is not support."); #endif } else { #ifdef PADDLE_WITH_CUDA - op_handle_.reset( - new ReduceOpHandle(local_scopes_, gpu_list_, nccl_ctxs_.get())); + op_handle_.reset(new ReduceOpHandle(nodes.back().get(), local_scopes_, + gpu_list_, nccl_ctxs_.get())); #else - op_handle_.reset(new ReduceOpHandle(local_scopes_, gpu_list_)); + op_handle_.reset( + new ReduceOpHandle(nodes.back().get(), local_scopes_, gpu_list_)); #endif } @@ -118,8 +121,10 @@ struct TestReduceOpHandle { if (!use_gpu_) { op_handle_->SetDeviceContext(gpu_list_[j], ctxs_[j].get()); } - auto *in_var_handle = new VarHandle(1, j, "input", gpu_list_[j]); - in_var_handle->generated_op_ = nullptr; + nodes.emplace_back(new ir::Node("node1")); + auto *in_var_handle = + new VarHandle(nodes.back().get(), 1, j, "input", gpu_list_[j]); + in_var_handle->ClearGeneratedOp(); vars_.emplace_back(in_var_handle); op_handle_->AddInput(in_var_handle); } @@ -128,12 +133,13 @@ struct TestReduceOpHandle { vars_.emplace_back(new DummyVarHandle()); DummyVarHandle *in_dummy_var_handle = static_cast(vars_.back().get()); - in_dummy_var_handle->generated_op_ = nullptr; + in_dummy_var_handle->ClearGeneratedOp(); op_handle_->AddInput(in_dummy_var_handle); // add output - auto *out_var_handle = - new VarHandle(2, out_scope_idx, "out", gpu_list_[out_scope_idx]); + nodes.emplace_back(new ir::Node("node2")); + auto *out_var_handle = new VarHandle(nodes.back().get(), 2, out_scope_idx, + "out", gpu_list_[out_scope_idx]); vars_.emplace_back(out_var_handle); op_handle_->AddOutput(out_var_handle); diff --git a/paddle/fluid/framework/details/send_op_handle.cc b/paddle/fluid/framework/details/rpc_op_handle.cc similarity index 59% rename from paddle/fluid/framework/details/send_op_handle.cc rename to paddle/fluid/framework/details/rpc_op_handle.cc index bd97c5260d..f44b374edb 100644 --- a/paddle/fluid/framework/details/send_op_handle.cc +++ b/paddle/fluid/framework/details/rpc_op_handle.cc @@ -12,28 +12,33 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/send_op_handle.h" +#include "paddle/fluid/framework/details/rpc_op_handle.h" +#include "paddle/fluid/framework/ir/graph.h" namespace paddle { namespace framework { namespace details { -SendOpHandle::SendOpHandle(const framework::OpDesc &op_desc, - const Scope *local_scope, - const platform::Place &place) - : op_(framework::OpRegistry::CreateOp(op_desc)), +RPCOpHandle::RPCOpHandle(ir::Node *node, const framework::OpDesc &op_desc, + const Scope *local_scope, const std::string &name, + const platform::Place &place) + : OpHandleBase(node), + op_(framework::OpRegistry::CreateOp(op_desc)), local_scope_(local_scope), + name_(name), place_(place) {} -void SendOpHandle::RunImpl() { +void RPCOpHandle::RunImpl() { + // TODO(wuyi): need further analysis whether wait VarDummyHandle. // Wait input done for (auto *in : inputs_) { auto &p = static_cast(in)->place_; - if (in->DebugString() == "dummy") { // HACK + // FIXME(Yancey1989): need a better solution instead of use DebugString() + if (ir::IsControlDepVar(*in->Node())) { // HACK continue; } - if (in->generated_op_) { - in->generated_op_->Wait(dev_ctxes_[p]); + if (in->GeneratedOp()) { + in->GeneratedOp()->RecordWaitEventOnCtx(dev_ctxes_[p]); } } auto &tmp_scope = local_scope_->FindVar(kLocalExecScopeName)->Get(); @@ -42,7 +47,7 @@ void SendOpHandle::RunImpl() { op_->Run(*tmp_scope, place_); } -std::string SendOpHandle::Name() const { return "send"; } +std::string RPCOpHandle::Name() const { return name_; } } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/send_op_handle.h b/paddle/fluid/framework/details/rpc_op_handle.h similarity index 83% rename from paddle/fluid/framework/details/send_op_handle.h rename to paddle/fluid/framework/details/rpc_op_handle.h index 2f78811fad..7f99cdeacf 100644 --- a/paddle/fluid/framework/details/send_op_handle.h +++ b/paddle/fluid/framework/details/rpc_op_handle.h @@ -27,9 +27,10 @@ namespace paddle { namespace framework { namespace details { -struct SendOpHandle : public OpHandleBase { - SendOpHandle(const framework::OpDesc& op_desc, const Scope* local_scope, - const platform::Place& place); +struct RPCOpHandle : public OpHandleBase { + RPCOpHandle(ir::Node* node, const framework::OpDesc& op_desc, + const Scope* local_scope, const std::string& name, + const platform::Place& place); std::string Name() const override; @@ -43,7 +44,8 @@ struct SendOpHandle : public OpHandleBase { private: std::unique_ptr op_; const Scope* local_scope_; - const platform::Place& place_; + const std::string name_; + platform::Place place_; }; } // namespace details diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc index 1cd3113030..609e185819 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.cc @@ -19,16 +19,21 @@ namespace paddle { namespace framework { namespace details { -ScaleLossGradOpHandle::ScaleLossGradOpHandle(size_t num_dev, Scope *scope, +ScaleLossGradOpHandle::ScaleLossGradOpHandle(ir::Node *node, size_t num_dev, + Scope *scope, platform::Place place, platform::DeviceContext *dev_ctx) - : coeff_(static_cast(1.0 / num_dev)), scope_(scope), place_(place) { + : OpHandleBase(node), + coeff_(static_cast(1.0 / num_dev)), + scope_(scope), + place_(place) { dev_ctxes_[place_] = dev_ctx; } ScaleLossGradOpHandle::~ScaleLossGradOpHandle() {} void ScaleLossGradOpHandle::RunImpl() { + // Doesn't wait any event std::string var_name = static_cast(this->outputs_[0])->name_; auto &local_scope = *scope_->FindVar(kLocalExecScopeName)->Get(); diff --git a/paddle/fluid/framework/details/scale_loss_grad_op_handle.h b/paddle/fluid/framework/details/scale_loss_grad_op_handle.h index d93d599d46..523b55724c 100644 --- a/paddle/fluid/framework/details/scale_loss_grad_op_handle.h +++ b/paddle/fluid/framework/details/scale_loss_grad_op_handle.h @@ -25,7 +25,8 @@ namespace framework { namespace details { struct ScaleLossGradOpHandle : public OpHandleBase { - ScaleLossGradOpHandle(size_t num_dev, Scope *scope, platform::Place place, + ScaleLossGradOpHandle(ir::Node *node, size_t num_dev, Scope *scope, + platform::Place place, platform::DeviceContext *context); ~ScaleLossGradOpHandle() final; diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc new file mode 100644 index 0000000000..5bd974d6b7 --- /dev/null +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.cc @@ -0,0 +1,89 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" +#include +#include +#include +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { +namespace framework { +namespace details { +ScopeBufferedSSAGraphExecutor::ScopeBufferedSSAGraphExecutor( + ExecutionStrategy strategy, std::vector local_scopes, + std::vector var_infos, std::vector places, + std::unique_ptr &&underlying_executor) + : strategy_(std::move(strategy)), + underlying_executor_(std::move(underlying_executor)), + local_scopes_(std::move(local_scopes)), + var_infos_(std::move(var_infos)), + places_(std::move(places)) {} + +FeedFetchList ScopeBufferedSSAGraphExecutor::Run( + const std::vector &fetch_tensors) { + if (drop_scope_counter_ == 0) { + // Create local scopes. + for (auto it = local_scopes_.rbegin(); it != local_scopes_.rend(); ++it) { + auto &scope = *it; + Scope &local_scope = scope->NewScope(); + *scope->Var(details::kLocalExecScopeName)->GetMutable() = + &local_scope; + + for (auto &info : var_infos_) { + if (scope->FindVar(info.name_) != nullptr) { + continue; + } + + if (info.persistable_) { // Persistable + InitializeVariable(scope->Var(info.name_), info.type_); + } else { + InitializeVariable(local_scope.Var(info.name_), info.type_); + } + } + } + } + std::vector fetch_data; + std::exception_ptr eptr; + try { + fetch_data = underlying_executor_->Run(fetch_tensors); + } catch (...) { + eptr = std::current_exception(); + } + + platform::RecordEvent e("ScopeBufferedSSAGraphExecutorAfterRun", nullptr); + drop_scope_counter_ += 1; + if (!fetch_tensors.empty() || + drop_scope_counter_ == strategy_.num_iteration_per_drop_scope_) { + drop_scope_counter_ = 0; + // Wait All computational streams + for (auto p : places_) { + platform::DeviceContextPool::Instance().Get(p)->Wait(); + } + for (auto &scope : local_scopes_) { + auto &local_scope = + *scope->Var(details::kLocalExecScopeName)->GetMutable(); + scope->DeleteScope(local_scope); + } + } + if (eptr) { + std::rethrow_exception(eptr); + } else { + return fetch_data; + } +} +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h new file mode 100644 index 0000000000..5e87e0bf50 --- /dev/null +++ b/paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h @@ -0,0 +1,61 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include "paddle/fluid/framework/details/op_handle_base.h" +#include "paddle/fluid/framework/details/var_handle.h" + +#include "paddle/fluid/framework/details/execution_strategy.h" +#include "paddle/fluid/framework/details/ssa_graph_executor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/place.h" +namespace paddle { +namespace framework { +namespace details { + +struct VariableInfo { + std::string name_; + proto::VarType::Type type_; + bool persistable_; +}; + +class ScopeBufferedSSAGraphExecutor : public SSAGraphExecutor { + public: + ScopeBufferedSSAGraphExecutor( + ExecutionStrategy strategy, std::vector local_scopes, + std::vector var_infos, std::vector places, + std::unique_ptr&& underlying_executor); + + const ir::Graph& Graph() const override { + return underlying_executor_->Graph(); + } + + FeedFetchList Run(const std::vector& fetch_tensors) override; + + private: + size_t drop_scope_counter_{0}; + + ExecutionStrategy strategy_; + std::unique_ptr underlying_executor_; + std::vector local_scopes_; + std::vector var_infos_; + std::vector places_; +}; +} // namespace details +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph.h b/paddle/fluid/framework/details/ssa_graph.h deleted file mode 100644 index e996a00c16..0000000000 --- a/paddle/fluid/framework/details/ssa_graph.h +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/details/op_handle_base.h" -#include "paddle/fluid/framework/details/var_handle.h" - -namespace paddle { -namespace framework { -namespace details { - -// A SSA graph used by parallel executor. -struct SSAGraph { - // all variable in each devices. - // The outside vector is the device vector. Each element of this vector is a - // map from variable name to variables. The variables, who have the same name, - // will have a different version. The offset in the - // `std::vector>` is the version of varaibles. - std::vector< - std::unordered_map>>> - vars_; - - // aux variables to represent dependency. Useful to resolve data hazard. - std::unordered_set> dep_vars_; - - // all operators. NOTE that even we use a vector here, the operators is - // unordered. - std::vector> ops_; -}; - -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_builder.cc b/paddle/fluid/framework/details/ssa_graph_builder.cc deleted file mode 100644 index 6a56752755..0000000000 --- a/paddle/fluid/framework/details/ssa_graph_builder.cc +++ /dev/null @@ -1,146 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/framework/details/ssa_graph_builder.h" - -namespace paddle { -namespace framework { -namespace details { -void SSAGraphBuilder::PolishGraphToSupportDataHazards(SSAGraph *graph) { - for (auto &var_map : graph->vars_) { - for (auto &name_pair : var_map) { - if (name_pair.second.size() <= 1) { - continue; - } - auto it_new = name_pair.second.rbegin(); - auto it_old = name_pair.second.rbegin(); - ++it_old; - for (; it_old != name_pair.second.rend(); it_new = it_old, ++it_old) { - auto *write_op = (*it_new)->generated_op_; - auto &read_ops = (*it_old)->pending_ops_; - - for (auto *read_op : read_ops) { - // Manually add a dependency var from read_op to write_op; - if (read_op == write_op) { - // Read Write is the same op. - continue; - } - - auto *dep_var = new DummyVarHandle(); - read_op->AddOutput(dep_var); - write_op->AddInput(dep_var); - graph->dep_vars_.emplace(dep_var); - } - } - } - } -} - -VarHandle *SSAGraphBuilder::CreateOrGetLatestVarHandle( - SSAGraph *graph, const std::string &each_var_name, - const platform::Place &place, size_t place_offset) { - auto &var_holders = graph->vars_[place_offset]; - auto &var_holder = var_holders[each_var_name]; - VarHandle *var = nullptr; - if (var_holder.empty()) { - var = new VarHandle(0, place_offset, each_var_name, place); - var_holder.emplace_back(var); - } else { - var = var_holder.rbegin()->get(); - } - return var; -} - -void SSAGraphBuilder::CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, - const std::string &each_var_name, - const platform::Place &place, - size_t place_offset) { - auto &vars = graph->vars_[place_offset][each_var_name]; - size_t version = vars.size(); - auto var = new VarHandle(version, place_offset, each_var_name, place); - vars.emplace_back(var); - op_handle->AddOutput(var); -} - -template -void IterAllVar(const SSAGraph &graph, Callback callback) { - for (auto &each : graph.vars_) { - for (auto &pair1 : each) { - for (auto &pair2 : pair1.second) { - callback(*pair2); - } - } - } - - for (auto &var : graph.dep_vars_) { - callback(*var); - } -} - -void SSAGraphBuilder::PrintGraphviz(const SSAGraph &graph, std::ostream &sout) { - size_t var_id = 0; - std::unordered_map vars; - - sout << "digraph G {\n"; - - IterAllVar(graph, [&](const VarHandleBase &var) { - auto *var_ptr = &var; - auto *var_handle_ptr = dynamic_cast(var_ptr); - auto *dummy_ptr = dynamic_cast(var_ptr); - - size_t cur_var_id = var_id++; - vars[var_ptr] = cur_var_id; - - if (var_handle_ptr) { - sout << "var_" << cur_var_id << " [label=\"" << var_handle_ptr->name_ - << "\\n" - << var_handle_ptr->place_ << "\\n" - << var_handle_ptr->version_ << "\"]" << std::endl; - } else if (dummy_ptr) { - sout << "var_" << cur_var_id << " [label=\"dummy\"]" << std::endl; - } - }); - - size_t op_id = 0; - for (auto &op : graph.ops_) { - std::string op_name = "op_" + std::to_string(op_id++); - sout << op_name << " [label=\"" << op->Name() << "\", shape=rect]" - << std::endl; - for (auto in : op->Inputs()) { - std::string var_name = "var_" + std::to_string(vars[in]); - sout << var_name << " -> " << op_name << std::endl; - } - - for (auto out : op->Outputs()) { - std::string var_name = "var_" + std::to_string(vars[out]); - sout << op_name << " -> " << var_name << std::endl; - } - } - - sout << "}\n"; -} - -void SSAGraphBuilder::AddOutputToLeafOps(SSAGraph *graph) { - for (auto &op : graph->ops_) { - if (!op->Outputs().empty()) { - continue; - } - auto *dummy_leaf = new DummyVarHandle(); - graph->dep_vars_.emplace(dummy_leaf); - op->AddOutput(dummy_leaf); - } -} -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_builder.h b/paddle/fluid/framework/details/ssa_graph_builder.h deleted file mode 100644 index 64e5d93081..0000000000 --- a/paddle/fluid/framework/details/ssa_graph_builder.h +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include - -#include "paddle/fluid/framework/details/ssa_graph.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/platform/place.h" - -namespace paddle { -namespace framework { -namespace details { - -class SSAGraphBuilder { - public: - SSAGraphBuilder() {} - virtual ~SSAGraphBuilder() {} - virtual std::unique_ptr Build(const ProgramDesc &program) const = 0; - - DISABLE_COPY_AND_ASSIGN(SSAGraphBuilder); - - protected: - /** - * We only handle write after read(WAR), since it should not have a write - * after write in program. If there are write after write operators, we need - * prune them. - * - * https://en.wikipedia.org/wiki/Hazard_(computer_architecture)#Write_after_read_(WAR) - */ - static void PolishGraphToSupportDataHazards(SSAGraph *graph); - - static VarHandle *CreateOrGetLatestVarHandle(SSAGraph *graph, - const std::string &each_var_name, - const platform::Place &place, - size_t place_offset); - - // Add an output variable (each_var_name, place, place_offset) to op_handle, - // which belongs to graph - static void CreateOpOutput(SSAGraph *graph, OpHandleBase *op_handle, - const std::string &each_var_name, - const platform::Place &place, size_t place_offset); - - static void AddOutputToLeafOps(SSAGraph *graph); - - static void PrintGraphviz(const SSAGraph &graph, std::ostream &sout); -}; -} // namespace details -} // namespace framework -} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph_executor.cc b/paddle/fluid/framework/details/ssa_graph_executor.cc index 8da6ca889b..09b97bd0d9 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/ssa_graph_executor.cc @@ -17,10 +17,6 @@ namespace paddle { namespace framework { namespace details { - -SSAGraphExecutor::SSAGraphExecutor(std::unique_ptr &&graph) - : graph_(std::move(graph)) {} - SSAGraphExecutor::~SSAGraphExecutor() {} } // namespace details diff --git a/paddle/fluid/framework/details/ssa_graph_executor.h b/paddle/fluid/framework/details/ssa_graph_executor.h index a8833b7388..96fffb7d94 100644 --- a/paddle/fluid/framework/details/ssa_graph_executor.h +++ b/paddle/fluid/framework/details/ssa_graph_executor.h @@ -18,8 +18,8 @@ #include #include -#include "paddle/fluid/framework/details/ssa_graph.h" #include "paddle/fluid/framework/feed_fetch_type.h" +#include "paddle/fluid/framework/ir/graph.h" namespace paddle { namespace framework { @@ -28,15 +28,13 @@ class SSAGraphExecutor { DISABLE_COPY_AND_ASSIGN(SSAGraphExecutor); public: - // Steal graph inside - explicit SSAGraphExecutor(std::unique_ptr &&graph); + SSAGraphExecutor() {} virtual ~SSAGraphExecutor(); - virtual FeedFetchList Run(const std::vector &fetch_tensors) = 0; + virtual const ir::Graph& Graph() const = 0; - protected: - std::unique_ptr graph_; + virtual FeedFetchList Run(const std::vector& fetch_tensors) = 0; }; } // namespace details } // namespace framework diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc index 5e6ed5cb7c..994bb6492f 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.cc @@ -14,27 +14,29 @@ #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" -#include "paddle/fluid/framework/details/fetch_op_handle.h" +#include "paddle/fluid/framework/details/multi_devices_helper.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { namespace details { ThreadedSSAGraphExecutor::ThreadedSSAGraphExecutor( - size_t num_threads, bool use_event, - const std::vector &local_scopes, + const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, - std::unique_ptr &&graph, bool allow_op_delay) - : SSAGraphExecutor(std::move(graph)), - pool_(num_threads >= 2 ? new ::ThreadPool(num_threads) : nullptr), + std::unique_ptr &&graph) + : graph_(std::move(graph)), + pool_(strategy.num_threads_ >= 2 ? new ::ThreadPool(strategy.num_threads_) + : nullptr), local_scopes_(local_scopes), places_(places), fetch_ctxs_(places), - use_event_(use_event), running_ops_(0), - allow_op_delay_(allow_op_delay) {} + strategy_(strategy) {} FeedFetchList ThreadedSSAGraphExecutor::Run( const std::vector &fetch_tensors) { + std::unique_ptr event( + new platform::RecordEvent("ThreadedSSAGraphExecutorPrepare", nullptr)); std::unordered_map pending_ops; std::unordered_set pending_vars; BlockingQueue ready_vars; @@ -45,73 +47,34 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // Should revisit it if overlapping is available. std::unordered_set delayed_ops; - auto InsertPendingVar = [&pending_vars, &ready_vars](VarHandleBase &var) { - pending_vars.insert(&var); - if (var.generated_op_ == nullptr) { - ready_vars.Push(&var); - } - }; - - auto InsertPendingOp = [&pending_ops](OpHandleBase &op_instance) { - pending_ops.insert({&op_instance, op_instance.Inputs().size()}); - }; - // Transform SSAGraph to pending_ops & pending_vars - for (auto &var_map : graph_->vars_) { + for (auto &var_map : graph_->Get(details::kGraphVars)) { for (auto &name_pair : var_map) { for (auto &version_pair : name_pair.second) { - InsertPendingVar(*version_pair); + InsertPendingVar(&pending_vars, &ready_vars, version_pair.get()); } } } - for (auto &var : graph_->dep_vars_) { - InsertPendingVar(*var); + for (auto &var : graph_->Get(details::kGraphDepVars)) { + InsertPendingVar(&pending_vars, &ready_vars, var.get()); } - for (auto &op : graph_->ops_) { + for (auto &op : graph_->Get(details::kGraphOps)) { if (op->Inputs().empty()) { // Special case, Op has no input. ready_ops.insert(op.get()); } else { - InsertPendingOp(*op); + InsertPendingOp(&pending_ops, op.get()); } } // Step 2. Insert FetchOps std::vector> fetch_ops; - FeedFetchList fetch_data(fetch_tensors.size()); - - std::unordered_map> fetched_vars; - - for (auto &fetch_var_name : fetch_tensors) { - for (auto &var_map : graph_->vars_) { - auto it = var_map.find(fetch_var_name); - if (it != var_map.end()) { - fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); - } - } - } - + std::vector> tmp_nodes; std::unordered_set> fetch_dependencies; - for (size_t i = 0; i < fetch_tensors.size(); ++i) { - auto &var_name = fetch_tensors[i]; - auto &vars = fetched_vars.at(var_name); - auto *op = new FetchOpHandle(&fetch_data, i, &local_scopes_); - fetch_ops.emplace_back(op); - - for (auto &p : places_) { - op->SetDeviceContext(p, fetch_ctxs_.Get(p)); - } - - for (auto *var : vars) { - op->AddInput(var); - } + FeedFetchList fetch_data(fetch_tensors.size()); - auto *fetch_dummy = new DummyVarHandle(); - op->AddOutput(fetch_dummy); - fetch_dependencies.emplace(fetch_dummy); - InsertPendingVar(*fetch_dummy); - InsertPendingOp(*op); - } + InsertFetchOps(fetch_tensors, &fetch_ops, &tmp_nodes, &fetch_dependencies, + &pending_ops, &pending_vars, &ready_vars, &fetch_data); auto run_all_ops = [&](std::unordered_set &set) { for (auto *op : set) { @@ -121,6 +84,11 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( set.clear(); }; + // Clean run context + run_op_futures_.clear(); + exception_holder_.Clear(); + event.reset(nullptr); + // Step 3. Execution while (!pending_vars.empty()) { // 1. Run All Ready ops @@ -128,7 +96,7 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // // NOTE: DelayedOps have a lower priority. It will be scheduled after all // ready_ops have been performed. - if (ready_ops.empty() && allow_op_delay_ && running_ops_ == 0) { + if (ready_ops.empty() && strategy_.allow_op_delay_ && running_ops_ == 0) { run_all_ops(delayed_ops); } else { run_all_ops(ready_ops); @@ -139,10 +107,11 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( auto cur_ready_vars = ready_vars.PopAll(1, &timeout); if (timeout) { - if (exception_) { - auto exp = *exception_; - exception_.reset(); - throw exp; + if (exception_holder_.ExceptionCatched()) { + for (auto &run_op_future : run_op_futures_) { + run_op_future.wait(); + } + exception_holder_.Throw(); } else { continue; } @@ -151,11 +120,11 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( // Find the ready_ops after the ready_var. for (auto ready_var : cur_ready_vars) { pending_vars.erase(ready_var); - for (auto *op : ready_var->pending_ops_) { + for (auto *op : ready_var->PendingOps()) { auto &deps = pending_ops[op]; --deps; if (deps == 0) { - if (op->IsMultiDeviceTransfer() && allow_op_delay_) { + if (op->IsMultiDeviceTransfer() && strategy_.allow_op_delay_) { delayed_ops.insert(op); } else { ready_ops.insert(op); @@ -174,24 +143,93 @@ FeedFetchList ThreadedSSAGraphExecutor::Run( return fetch_data; } +void ThreadedSSAGraphExecutor::InsertFetchOps( + const std::vector &fetch_tensors, + std::vector> *fetch_ops, + std::vector> *temp_nodes, + std::unordered_set> *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, FeedFetchList *fetch_data) { + std::unordered_map> fetched_vars; + + for (auto &fetch_var_name : fetch_tensors) { + for (auto &var_map : graph_->Get(details::kGraphVars)) { + auto it = var_map.find(fetch_var_name); + if (it != var_map.end()) { + fetched_vars[fetch_var_name].push_back(it->second.rbegin()->get()); + } + } + } + + for (size_t i = 0; i < fetch_tensors.size(); ++i) { + auto &var_name = fetch_tensors[i]; + auto fetched_var_it = fetched_vars.find(var_name); + PADDLE_ENFORCE(fetched_var_it != fetched_vars.end(), + "Cannot find fetched variable.(Perhaps the main_program " + "is not set to ParallelExecutor)"); + + auto &vars = fetched_var_it->second; + + temp_nodes->emplace_back(new ir::Node("fetch", ir::Node::Type::kOperation)); + auto *op = new FetchOpHandle(temp_nodes->back().get(), fetch_data, i, + &local_scopes_); + fetch_ops->emplace_back(op); + + for (auto &p : places_) { + op->SetDeviceContext(p, fetch_ctxs_.Get(p)); + } + + for (auto *var : vars) { + op->AddInput(var); + } + + temp_nodes->emplace_back(new ir::Node("fetch", ir::Node::Type::kOperation)); + auto *fetch_dummy = new DummyVarHandle(temp_nodes->back().get()); + op->AddOutput(fetch_dummy); + fetch_dependencies->emplace(fetch_dummy); + this->InsertPendingVar(pending_vars, ready_vars, fetch_dummy); + this->InsertPendingOp(pending_ops, op); + } +} + +void ThreadedSSAGraphExecutor::InsertPendingOp( + std::unordered_map *pending_ops, + OpHandleBase *op_instance) const { + pending_ops->insert({op_instance, op_instance->NoDupInputSize()}); +} + +void ThreadedSSAGraphExecutor::InsertPendingVar( + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, VarHandleBase *var) const { + pending_vars->insert(var); + if (var->GeneratedOp() == nullptr) { + ready_vars->Push(var); + } +} + void ThreadedSSAGraphExecutor::RunOp( BlockingQueue *ready_var_q, details::OpHandleBase *op) { auto op_run = [ready_var_q, op, this] { try { - VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); - op->Run(use_event_); + if (VLOG_IS_ON(10)) { + VLOG(10) << op << " " << op->Name() << " : " << op->DebugString(); + } + op->Run(strategy_.use_cuda_); VLOG(10) << op << " " << op->Name() << " Done "; running_ops_--; ready_var_q->Extend(op->Outputs()); VLOG(10) << op << " " << op->Name() << "Signal posted"; + } catch (platform::EOFException ex) { + exception_holder_.Catch(ex); } catch (platform::EnforceNotMet ex) { - exception_.reset(new platform::EnforceNotMet(ex)); + exception_holder_.Catch(ex); } catch (...) { LOG(FATAL) << "Unknown exception catched"; } }; if (pool_) { - pool_->enqueue(op_run); + run_op_futures_.emplace_back(pool_->enqueue(op_run)); } else { op_run(); } diff --git a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h index d089b79d91..9135c1f5d4 100644 --- a/paddle/fluid/framework/details/threaded_ssa_graph_executor.h +++ b/paddle/fluid/framework/details/threaded_ssa_graph_executor.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include #include @@ -23,7 +24,11 @@ #include #include "ThreadPool.h" // ThreadPool in thrird party #include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/details/exception_holder.h" +#include "paddle/fluid/framework/details/execution_strategy.h" +#include "paddle/fluid/framework/details/fetch_op_handle.h" #include "paddle/fluid/framework/details/ssa_graph_executor.h" +#include "paddle/fluid/framework/ir/graph.h" namespace paddle { namespace framework { @@ -33,12 +38,12 @@ namespace details { class ThreadedSSAGraphExecutor : public SSAGraphExecutor { public: - ThreadedSSAGraphExecutor(size_t num_threads, bool use_event, + ThreadedSSAGraphExecutor(const ExecutionStrategy &strategy, const std::vector &local_scopes, const std::vector &places, - std::unique_ptr &&graph, - bool allow_op_delay); + std::unique_ptr &&graph); + const ir::Graph &Graph() const override { return *graph_; } // Run a SSAGraph by a thread pool // Use topological sort algorithm FeedFetchList Run(const std::vector &fetch_tensors) override; @@ -50,14 +55,34 @@ class ThreadedSSAGraphExecutor : public SSAGraphExecutor { details::OpHandleBase *op); private: + std::unique_ptr graph_; std::unique_ptr<::ThreadPool> pool_; std::vector local_scopes_; std::vector places_; platform::DeviceContextPool fetch_ctxs_; - const bool use_event_; - std::unique_ptr exception_; + ExceptionHolder exception_holder_; std::atomic running_ops_; - bool allow_op_delay_; + + void InsertPendingOp(std::unordered_map *pending_ops, + OpHandleBase *op_instance) const; + + void InsertPendingVar(std::unordered_set *pending_vars, + BlockingQueue *ready_vars, + VarHandleBase *var) const; + + void InsertFetchOps( + const std::vector &fetch_tensors, + std::vector> *fetch_ops, + std::vector> *temp_nodes, + std::unordered_set> *fetch_dependencies, + std::unordered_map *pending_ops, + std::unordered_set *pending_vars, + BlockingQueue *ready_vars, FeedFetchList *fetch_data); + + private: + ExecutionStrategy strategy_; + // use std::list because clear(), push_back, and for_each are O(1) + std::list> run_op_futures_; }; } // namespace details diff --git a/paddle/fluid/framework/details/var_handle.cc b/paddle/fluid/framework/details/var_handle.cc index 6f00abd947..5457870e9f 100644 --- a/paddle/fluid/framework/details/var_handle.cc +++ b/paddle/fluid/framework/details/var_handle.cc @@ -26,7 +26,7 @@ std::string VarHandle::DebugString() const { return ss.str(); } -std::string DummyVarHandle::DebugString() const { return "dummy"; } +std::string DummyVarHandle::DebugString() const { return node_->Name(); } } // namespace details } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/details/var_handle.h b/paddle/fluid/framework/details/var_handle.h index cae9af7217..d8c2bc40b9 100644 --- a/paddle/fluid/framework/details/var_handle.h +++ b/paddle/fluid/framework/details/var_handle.h @@ -13,11 +13,14 @@ // limitations under the License. #pragma once + +#include #include #include #include #include +#include "paddle/fluid/framework/ir/node.h" #include "paddle/fluid/platform/place.h" namespace paddle { @@ -25,19 +28,60 @@ namespace framework { namespace details { class OpHandleBase; +// Wraps ir::Node and provide helper utilities. +// It's responsible for populating necessary fields of ir::Node. +// // VarHandleBase is the var node in the dependency graph. // A variable can only be generated by a single operator. i.e. // This is a single assignment graph. struct VarHandleBase { + explicit VarHandleBase(ir::Node* node) : node_(node) {} + virtual ~VarHandleBase(); + virtual std::string DebugString() const = 0; + void AddInput(OpHandleBase* in, ir::Node* node) { + node_->inputs.clear(); + node_->inputs.push_back(node); + generated_op_ = in; + } + + void AddOutput(OpHandleBase* out, ir::Node* node) { + if (pending_ops_.find(out) == pending_ops_.end()) { + pending_ops_.insert(out); + node_->outputs.push_back(node); + } + } + + void RemoveOutput(OpHandleBase* out, ir::Node* node) { + pending_ops_.erase(out); + node_->outputs.erase( + std::remove(node_->outputs.begin(), node_->outputs.end(), node), + node_->outputs.end()); + } + + void ClearGeneratedOp() { + generated_op_ = nullptr; + node_->inputs.clear(); + } + + OpHandleBase* GeneratedOp() { return generated_op_; } + + const std::unordered_set& PendingOps() const { + return pending_ops_; + } + + ir::Node* Node() { return node_; } + + protected: // The operator who generate this variable. nullptr if the variable // is a root node. OpHandleBase* generated_op_{nullptr}; // Operators which depend on this variable ready. std::unordered_set pending_ops_; + ir::Node* node_; }; // VarHandle is actually a single version of Runtime Variable. @@ -46,11 +90,14 @@ struct VarHandleBase { // // NOTE: runtime variables have place. struct VarHandle : public VarHandleBase { + explicit VarHandle(ir::Node* node) : VarHandleBase(node) {} + std::string DebugString() const override; - VarHandle(size_t version, size_t scope_index, std::string name, - platform::Place place) - : version_(version), + VarHandle(ir::Node* node, size_t version, size_t scope_index, + std::string name, platform::Place place) + : VarHandleBase(node), + version_(version), scope_idx_(scope_index), name_(std::move(name)), place_(std::move(place)) {} @@ -70,6 +117,8 @@ struct VarHandle : public VarHandleBase { // Dummy Variable. It is used to represent dependencies between operators struct DummyVarHandle : public VarHandleBase { + explicit DummyVarHandle(ir::Node* node) : VarHandleBase(node) {} + std::string DebugString() const override; }; diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index ce91d7a826..dad170ed78 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -20,13 +20,12 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" +#include "paddle/fluid/operators/detail/macros.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" DECLARE_bool(benchmark); -DEFINE_bool(check_nan_inf, false, - "Checking whether operator produce NAN/INF or not. It will be " - "extremely slow so please use this flag wisely."); +DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run"); namespace paddle { namespace framework { @@ -46,6 +45,14 @@ ExecutorPrepareContext::~ExecutorPrepareContext() { Executor::Executor(const platform::Place& place) : place_(place) {} +void Executor::Close() { +#ifdef PADDLE_WITH_DISTRIBUTE + ::paddle::operators::distributed::RPCClient::GetInstance< + ::paddle::operators::distributed::GRPCClient>() + ->SendComplete(); +#endif +} + void InitializeVariable(Variable* var, proto::VarType::Type var_type) { if (var_type == proto::VarType::LOD_TENSOR) { var->GetMutable(); @@ -78,21 +85,6 @@ void InitializeVariable(Variable* var, proto::VarType::Type var_type) { } } -static void CheckTensorNANOrInf(const std::string& name, - const framework::Tensor& tensor) { - if (tensor.memory_size() == 0) { - return; - } - if (tensor.type().hash_code() != typeid(float).hash_code() && // NOLINT - tensor.type().hash_code() != typeid(double).hash_code()) { // NOLINT - return; - } - PADDLE_ENFORCE(!framework::TensorContainsInf(tensor), - "Tensor %s contains Inf", name); - PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor), - "Tensor %s contains NAN", name); -} - void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope, int block_id) { auto& global_block = pdesc.Block(block_id); @@ -133,6 +125,7 @@ void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope, void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id, bool create_local_scope, bool create_vars) { platform::RecordBlock b(block_id); + if (FLAGS_use_mkldnn) EnableMKLDNN(pdesc); auto ctx = Prepare(pdesc, block_id); RunPreparedContext(ctx.get(), scope, create_local_scope, create_vars); } @@ -228,19 +221,22 @@ static bool has_fetch_operators( void Executor::Run(const ProgramDesc& program, Scope* scope, std::map* feed_targets, std::map* fetch_targets, - bool create_vars, const std::string& feed_holder_name, + bool create_local_scope, bool create_vars, + const std::string& feed_holder_name, const std::string& fetch_holder_name) { platform::RecordBlock b(kProgramId); + if (FLAGS_use_mkldnn) EnableMKLDNN(program); bool has_feed_ops = has_feed_operators(program.Block(0), *feed_targets, feed_holder_name); bool has_fetch_ops = has_fetch_operators(program.Block(0), *fetch_targets, fetch_holder_name); ProgramDesc* copy_program = const_cast(&program); + std::unique_ptr unique_ptr_of_copy_program; if (!has_feed_ops || !has_fetch_ops) { - copy_program = std::unique_ptr(new ProgramDesc(program)).get(); + unique_ptr_of_copy_program.reset(new ProgramDesc(program)); + copy_program = unique_ptr_of_copy_program.get(); } - auto* global_block = copy_program->MutableBlock(0); if (!has_feed_ops) { @@ -290,19 +286,21 @@ void Executor::Run(const ProgramDesc& program, Scope* scope, } auto ctx = Prepare(*copy_program, 0); - RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, create_vars, - feed_holder_name, fetch_holder_name); + RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets, + create_local_scope, create_vars, feed_holder_name, + fetch_holder_name); } std::unique_ptr Executor::Prepare( const ProgramDesc& program, int block_id) { - auto* ctx = new ExecutorPrepareContext(program, block_id); + std::unique_ptr ctx( + new ExecutorPrepareContext(program, block_id)); PADDLE_ENFORCE_LT(static_cast(block_id), program.Size()); auto& block = program.Block(block_id); for (auto& op_desc : block.AllOps()) { ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc)); } - return std::unique_ptr(ctx); + return ctx; } std::vector> Executor::Prepare( @@ -321,7 +319,8 @@ std::vector> Executor::Prepare( } void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, - bool create_local_scope, bool create_vars) { + bool create_local_scope, bool create_vars, + bool keep_kids) { Scope* local_scope = scope; if (create_vars) { if (create_local_scope) { @@ -331,30 +330,28 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, } for (auto& op : ctx->ops_) { - VLOG(3) << place_ << " " << op->DebugStringEx(local_scope); op->Run(*local_scope, place_); if (FLAGS_benchmark) { VLOG(2) << "Memory used after operator " + op->Type() + " running: " << memory::memory_usage(place_); } - if (FLAGS_check_nan_inf) { - for (auto& vname : op->OutputVars(true)) { - auto* var = local_scope->FindVar(vname); - if (var == nullptr) continue; - if (var->IsType()) { - CheckTensorNANOrInf(vname, var->Get()); - } - } - } } platform::DeviceContextPool::Instance().Get(place_)->Wait(); - if (create_vars && create_local_scope) { + if (local_scope != scope) { scope->DeleteScope(local_scope); } else { - // Delete the local scopes created in operators. - scope->DropKids(); + if (!keep_kids) { + // By default, we should delete all kid scopes after run executor because + // some operators may create local scope when running, such as while_op. + // But when while_op also create a local executor to run it's sub block, + // the sub scopes it created should not be dropped immediately, because + // while_grad_op will use some variables created during while_op run, so + // we need to keep the kids and wait for the outer executor to drop them. + scope->DropKids(); + } } + if (FLAGS_benchmark) { VLOG(2) << "-------------------------------------------------------"; VLOG(2) << "Memory used after deleting local scope: " @@ -366,8 +363,9 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, void Executor::RunPreparedContext( ExecutorPrepareContext* ctx, Scope* scope, std::map* feed_targets, - std::map* fetch_targets, bool create_vars, - const std::string& feed_holder_name, const std::string& fetch_holder_name) { + std::map* fetch_targets, bool create_local_scope, + bool create_vars, const std::string& feed_holder_name, + const std::string& fetch_holder_name) { auto& global_block = ctx->prog_.Block(ctx->block_id_); PADDLE_ENFORCE( @@ -387,7 +385,7 @@ void Executor::RunPreparedContext( } } - RunPreparedContext(ctx, scope, create_vars, create_vars); + RunPreparedContext(ctx, scope, create_local_scope, create_vars); // obtain the data of fetch_targets from fetch_holder for (auto* op : global_block.AllOps()) { @@ -400,5 +398,22 @@ void Executor::RunPreparedContext( } } +void Executor::EnableMKLDNN(const ProgramDesc& program) { +#ifdef PADDLE_WITH_MKLDNN + VLOG(3) << "use_mkldnn=True"; + for (size_t bid = 0; bid < program.Size(); ++bid) { + auto* block = const_cast(program).MutableBlock(bid); + for (auto* op : block->AllOps()) { + if (op->HasAttr("use_mkldnn")) { + op->SetAttr("use_mkldnn", true); + } + } + } +#else + LOG(WARNING) + << "'MKLDNN' is not supported, Please re-compile with WITH_MKLDNN option"; +#endif +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/executor.h b/paddle/fluid/framework/executor.h index 4a3d637e2d..214ca3dc49 100644 --- a/paddle/fluid/framework/executor.h +++ b/paddle/fluid/framework/executor.h @@ -44,6 +44,12 @@ class Executor { explicit Executor(const platform::Place& place); + /* + * Close this Executor. + * Calling this method will send complete messages to all pserver instances. + */ + void Close(); + /* @Brief * Runtime evaluation of the given ProgramDesc under certain Scope * @@ -57,7 +63,7 @@ class Executor { void Run(const ProgramDesc& program, Scope* scope, std::map* feed_targets, std::map* fetch_targets, - bool create_vars = true, + bool create_local_scope = true, bool create_vars = true, const std::string& feed_holder_name = "feed", const std::string& fetch_holder_name = "fetch"); @@ -71,15 +77,18 @@ class Executor { void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, bool create_local_scope = true, - bool create_vars = true); + bool create_vars = true, bool keep_kids = false); void RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, std::map* feed_targets, std::map* fetch_targets, + bool create_local_scope = true, bool create_vars = true, const std::string& feed_holder_name = "feed", const std::string& fetch_holder_name = "fetch"); + void EnableMKLDNN(const ProgramDesc& program); + private: const platform::Place place_; }; diff --git a/paddle/fluid/framework/framework.proto b/paddle/fluid/framework/framework.proto index 96f53dc1bc..2cf14bd371 100644 --- a/paddle/fluid/framework/framework.proto +++ b/paddle/fluid/framework/framework.proto @@ -27,6 +27,7 @@ enum AttrType { BOOLEANS = 7; BLOCK = 8; LONG = 9; + BLOCKS = 10; } // OpDesc describes an instance of a C++ framework::OperatorBase @@ -46,6 +47,7 @@ message OpDesc { repeated bool bools = 11; optional int32 block_idx = 12; optional int64 l = 13; + repeated int32 blocks_idx = 14; }; message Var { @@ -71,6 +73,7 @@ message OpProto { optional bool duplicable = 3 [ default = false ]; optional bool intermediate = 4 [ default = false ]; optional bool dispensable = 5 [ default = false ]; + optional string reuse = 6; } // AttrProto describes the C++ type Attribute. @@ -101,6 +104,9 @@ message VarType { FP16 = 4; FP32 = 5; FP64 = 6; + // Tensor is used in C++. + SIZE_T = 19; + UINT8 = 20; // Other types that may need additional descriptions LOD_TENSOR = 7; diff --git a/paddle/fluid/framework/ir/CMakeLists.txt b/paddle/fluid/framework/ir/CMakeLists.txt new file mode 100644 index 0000000000..bf7d76a8a6 --- /dev/null +++ b/paddle/fluid/framework/ir/CMakeLists.txt @@ -0,0 +1,9 @@ +cc_library(node SRCS node.cc DEPS proto_desc) +cc_library(graph SRCS graph.cc DEPS node) +cc_library(graph_helper SRCS graph_helper.cc DEPS graph) +cc_library(pass SRCS pass.cc DEPS graph node graph_helper) +cc_library(graph_viz_pass SRCS graph_viz_pass.cc DEPS graph pass graph_helper) + +cc_test(pass_test SRCS pass_test.cc DEPS graph pass graph_helper) +cc_test(graph_test SRCS graph_test.cc DEPS graph graph_helper op_registry) +cc_test(graph_helper_test SRCS graph_helper_test.cc DEPS graph graph_helper op_registry) diff --git a/paddle/fluid/framework/ir/graph.cc b/paddle/fluid/framework/ir/graph.cc new file mode 100644 index 0000000000..f87d5212c0 --- /dev/null +++ b/paddle/fluid/framework/ir/graph.cc @@ -0,0 +1,252 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/op_proto_maker.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/var_desc.h" + +namespace paddle { +namespace framework { +namespace ir { + +std::vector FindDistTrainSendVars( + const std::vector &nodes) { + std::vector send_vars; + // since parameters are all in block 0, + // it's enough to only scan send ops in block 0 + for (auto &node : nodes) { + auto op_vars = node->Op()->InputArgumentNames(); + send_vars.reserve(send_vars.size() + + std::distance(op_vars.begin(), op_vars.end())); + send_vars.insert(send_vars.end(), op_vars.begin(), op_vars.end()); + } + return send_vars; +} + +std::vector FindDistTrainRecvVars( + const std::vector &nodes) { + std::vector recv_vars; + for (auto &node : nodes) { + auto op_vars = node->Op()->OutputArgumentNames(); + recv_vars.reserve(recv_vars.size() + + std::distance(op_vars.begin(), op_vars.end())); + recv_vars.insert(recv_vars.end(), op_vars.begin(), op_vars.end()); + } + return recv_vars; +} + +bool IsDistTrainOp(ir::Node *node, const std::vector &send_vars, + const std::vector &recv_vars) { + if (send_vars.size() == 0 || recv_vars.size() == 0) { + return false; + } + + /** + * Check any of opvars contains `.block` and in sendvars + */ + auto checker = [](const std::vector &opvars, + const std::vector &rpc_vars) -> bool { + for (auto &var : opvars) { + // a variable name with the suffix `.block` means it's a splited + // variable by (DistributeTranspiler) + // [python/paddle/fluid/transpiler/distribute_transpiler.py] + if (var.find(".block") != std::string::npos && + std::find(rpc_vars.begin(), rpc_vars.end(), var) != rpc_vars.end()) { + return true; + } + } + return false; + }; + + std::vector input_var_names; + std::vector output_var_names; + for (ir::Node *input : node->inputs) { + input_var_names.push_back(input->Name()); + } + for (ir::Node *output : node->outputs) { + output_var_names.push_back(output->Name()); + } + + return checker(output_var_names, send_vars) || + checker(input_var_names, recv_vars); +} + +Graph::Graph(const ProgramDesc &program) : program_(program) { + VLOG(3) << "block in program:" << program_.Size(); + std::unordered_map all_vars; + for (auto *var : program.Block(0).AllVars()) { + all_vars.emplace(var->Name(), var); + } + + std::map> var_nodes; + for (auto *op : program.Block(0).AllOps()) { + ir::Node *node = CreateOpNode(op); + // For input args, reuse the same var name if it was created before. + // Otherwise, create a new one. + for (auto &each_var_name : op->InputArgumentNames()) { + ir::Node *var = nullptr; + if (var_nodes.find(each_var_name) != var_nodes.end()) { + var = var_nodes.at(each_var_name).back(); + } else if (all_vars.count(each_var_name) != 0) { + var = CreateVarNode(all_vars.at(each_var_name)); + var_nodes[each_var_name].push_back(var); + } else { + // Operation input var can be optional (dispensable). Which means + // the operation doesn't really need the var at runtime. In this + // case, the no-existed var is ready at the beginning. + var = CreateEmptyNode(each_var_name, ir::Node::Type::kVariable); + var_nodes[each_var_name].push_back(var); + } + node->inputs.push_back(var); + var->outputs.push_back(node); + } + // For output args, always create a new var. + for (auto &each_var_name : op->OutputArgumentNames()) { + ir::Node *var = CreateVarNode(all_vars.at(each_var_name)); + var_nodes[each_var_name].push_back(var); + node->outputs.push_back(var); + var->inputs.push_back(node); + } + } + + std::vector send_ops; + ir::Node *send_bar = nullptr; + std::vector recv_ops; + ir::Node *fetch_bar = nullptr; + for (ir::Node *node : Nodes()) { + if (node->Name() == "send") { + send_ops.push_back(node); + } else if (node->Name() == "send_barrier") { + PADDLE_ENFORCE(!send_bar, "only has one send barrier"); + send_bar = node; + } else if (node->Name() == "recv") { + recv_ops.push_back(node); + } else if (node->Name() == "fetch_barrier") { + PADDLE_ENFORCE(!fetch_bar, "only has one fetch barrier"); + fetch_bar = node; + } + } + if (send_bar) { + for (ir::Node *send : send_ops) { + ir::Node *dep_var = CreateControlDepVar(); + send->outputs.push_back(dep_var); + dep_var->inputs.push_back(send); + send_bar->inputs.push_back(dep_var); + dep_var->outputs.push_back(send_bar); + } + for (ir::Node *recv : recv_ops) { + ir::Node *dep_var = CreateControlDepVar(); + recv->inputs.push_back(dep_var); + dep_var->outputs.push_back(recv); + send_bar->outputs.push_back(dep_var); + dep_var->inputs.push_back(send_bar); + } + } + if (fetch_bar) { + for (ir::Node *recv : recv_ops) { + ir::Node *dep_var = CreateControlDepVar(); + recv->outputs.push_back(dep_var); + dep_var->inputs.push_back(recv); + fetch_bar->inputs.push_back(dep_var); + dep_var->outputs.push_back(fetch_bar); + } + } + + std::vector send_vars = FindDistTrainSendVars(send_ops); + std::vector recv_vars = FindDistTrainRecvVars(recv_ops); + for (ir::Node *node : Nodes()) { + if (IsDistTrainOp(node, send_vars, recv_vars)) { + if (fetch_bar && node->Name() == "concat") { + ir::Node *dep_var = CreateControlDepVar(); + fetch_bar->outputs.push_back(dep_var); + dep_var->inputs.push_back(fetch_bar); + node->inputs.push_back(dep_var); + dep_var->outputs.push_back(node); + } + } + } + + /** + * We should handle write after read(WAR) and write after write(WAW) here. + * Because some of the operators of the program can be executed parallelly. + * So, to make the program running in the right order, we should add the + * dependence of WAR and WAW. + * + * + * https://en.wikipedia.org/wiki/Hazard_(computer_architecture)#Write_after_read_(WAR) + */ + + for (auto &var : var_nodes) { + auto &versions = var.second; + if (versions.size() <= 1) continue; + + auto it_new = versions.rbegin(); + auto it_old = versions.rbegin(); + ++it_old; + for (; it_old != versions.rend(); it_new = it_old, ++it_old) { + ir::Node *write_op = + (*it_new)->inputs.empty() ? nullptr : (*it_new)->inputs[0]; + const auto &read_ops = (*it_old)->outputs; + + PADDLE_ENFORCE(write_op, "The write_op should not be empty."); + + // Add write after write dependence + ir::Node *upstream_op = + (*it_old)->inputs.empty() ? nullptr : (*it_old)->inputs[0]; + if (upstream_op) { + ir::Node *dep_var = CreateControlDepVar(); + write_op->inputs.push_back(dep_var); + upstream_op->outputs.push_back(dep_var); + dep_var->outputs.push_back(write_op); + dep_var->inputs.push_back(upstream_op); + } + + for (auto *read_op : read_ops) { + // Manually add a dependency var from read_op to write_op; + if (read_op == write_op) { + // Read Write is the same op. + continue; + } + // 2 ops might have been connected via other vars. + bool has_dep = false; + for (ir::Node *r_out : read_op->outputs) { + for (ir::Node *w_in : write_op->inputs) { + if (r_out == w_in) { + has_dep = true; + break; + } + } + } + if (has_dep) continue; + + ir::Node *dep_var = CreateControlDepVar(); + read_op->outputs.push_back(dep_var); + dep_var->inputs.push_back(read_op); + write_op->inputs.push_back(dep_var); + dep_var->outputs.push_back(write_op); + } + } + } +} + +bool IsControlDepVar(const ir::Node &var) { + return var.Name().find(ir::Node::kControlDepVarName) != std::string::npos; +} +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph.h b/paddle/fluid/framework/ir/graph.h new file mode 100644 index 0000000000..c9d55fbf52 --- /dev/null +++ b/paddle/fluid/framework/ir/graph.h @@ -0,0 +1,131 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include + +#include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/variant.h" + +namespace paddle { +namespace framework { +namespace ir { + +class Graph { + public: + explicit Graph(const ProgramDesc &program); + + virtual ~Graph() { + for (auto &attr : attrs_) { + attr_dels_[attr.first](); + } + attrs_.clear(); + attr_dels_.clear(); + } + + bool Has(const std::string &attr_name) const { + return attrs_.find(attr_name) != attrs_.end(); + } + + template + AttrType &Get(const std::string &attr_name) const { + PADDLE_ENFORCE(Has(attr_name), "%s attr not registered for graph.", + attr_name); + return *boost::any_cast(attrs_.at(attr_name)); + } + + template + void Set(const std::string &attr_name, AttrType *attr) { + PADDLE_ENFORCE(attrs_.count(attr_name) == 0, "%s already set in the graph", + attr_name); + attrs_[attr_name] = attr; + attr_dels_[attr_name] = [attr, attr_name]() { + VLOG(3) << "deleting " << attr_name; + delete attr; + }; + } + + const std::unordered_set &Nodes() const { return node_set_; } + + // Create a normal variable with non-null VarDesc. + ir::Node *CreateVarNode(VarDesc *var_desc) { + return AddNode(new ir::Node(var_desc)); + } + + // Create a normal runnable operator with OpDesc. + ir::Node *CreateOpNode(OpDesc *op_desc) { + return AddNode(new ir::Node(op_desc)); + } + + // Create a control dependency var that connects 2 operations. The + // var doesn't hold any data. Other than that, it's no different from + // other var, considering dependency analysis. + ir::Node *CreateControlDepVar() { + // TODO(panyx0718): control var name should be really unique. + const std::string name = string::Sprintf( + "%s@%llu", ir::Node::kControlDepVarName, node_set_.size()); + return AddNode(new ir::Node(name, ir::Node::Type::kVariable)); + } + + // A more free style way of creating a graph node. Mostly use for test + // or "copy" from another node. Avoid using it if possible. + ir::Node *CreateEmptyNode(const std::string &name, ir::Node::Type type) { + return AddNode(new ir::Node(name, type)); + } + + // Clear all node information of the graph and return the ownership of the + // nodes. + std::vector> ReleaseNodes() { + std::vector> ret; + for (auto &n : nodes_) { + ret.emplace_back(n.second.release()); + } + nodes_.clear(); + node_set_.clear(); + return ret; + } + + private: + // This method takes ownership of `node`. + ir::Node *AddNode(ir::Node *node) { + PADDLE_ENFORCE(node_set_.find(node) == node_set_.end()); + nodes_[node].reset(node); + node_set_.insert(node); + return node; + } + + void RemoveNode(ir::Node *node) { + PADDLE_ENFORCE(node_set_.find(node) != node_set_.end()); + node_set_.erase(node); + nodes_.erase(node); + } + + // NOTE: program_ shouldn't be exposed to user. + const ProgramDesc &program_; + std::map attrs_; + std::map> attr_dels_; + std::map> nodes_; + std::unordered_set node_set_; +}; + +bool IsControlDepVar(const ir::Node &var); +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_helper.cc b/paddle/fluid/framework/ir/graph_helper.cc new file mode 100644 index 0000000000..b1c19e6535 --- /dev/null +++ b/paddle/fluid/framework/ir/graph_helper.cc @@ -0,0 +1,118 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/framework/ir/graph_helper.h" + +namespace paddle { +namespace framework { +namespace ir { +namespace { +void SortHelper( + const std::map> &adj_list, + ir::Node *node, std::unordered_set *visited, + std::vector *ret) { + visited->insert(node); + + for (auto adj : adj_list.at(node)) { + if (visited->find(adj) == visited->end()) { + SortHelper(adj_list, adj, visited, ret); + } + } + + VLOG(3) << "topology sort insert: " << node->Name() + << reinterpret_cast(node) << " input " << node->inputs.size(); + ret->push_back(node); +} + +bool HasCircleHelper( + ir::Node *node, + const std::map> &adj_list, + std::unordered_set *visited, + std::unordered_set *in_trace) { + if (visited->find(node) == visited->end()) { + visited->insert(node); + in_trace->insert(node); + + for (ir::Node *in : adj_list.at(node)) { + if (visited->find(in) == visited->end() && + HasCircleHelper(in, adj_list, visited, in_trace)) { + return true; + } else if (in_trace->find(in) != in_trace->end()) { + return true; + } + } + } + in_trace->erase(node); + return false; +} + +bool HasCircleInternal( + const std::map> &adj_list) { + std::unordered_set visited; + std::unordered_set in_trace; + for (auto &adj : adj_list) { + if (HasCircleHelper(adj.first, adj_list, &visited, &in_trace)) { + return true; + } + } + return false; +} +} // namespace + +bool HasCircle(const Graph &graph) { + return HasCircleInternal(BuildOperationAdjList(graph)); +} + +std::vector TopologySortOperations(const Graph &graph) { + std::map> adj_list = + BuildOperationAdjList(graph); + PADDLE_ENFORCE(!HasCircleInternal(adj_list)); + std::unordered_set visited; + std::vector ret; + for (auto adj : adj_list) { + if (visited.find(adj.first) == visited.end()) { + SortHelper(adj_list, adj.first, &visited, &ret); + } + } + return ret; +} + +std::map> BuildOperationAdjList( + const Graph &graph) { + std::map> adj_list; + + for (auto &n : graph.Nodes()) { + if (n->NodeType() != ir::Node::Type::kOperation) continue; + if (adj_list.find(n) == adj_list.end()) { + adj_list[n] = std::unordered_set(); + } + for (auto &var : n->inputs) { + for (auto &adj_n : var->inputs) { + PADDLE_ENFORCE(adj_n->NodeType() == ir::Node::Type::kOperation); + adj_list[n].insert(adj_n); + VLOG(3) << "adj " << adj_n->Name() << reinterpret_cast(adj_n) + << " -> " << n->Name() << reinterpret_cast(n) + << " via " << var->Name() << reinterpret_cast(var); + } + } + } + return adj_list; +} + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_helper.h b/paddle/fluid/framework/ir/graph_helper.h new file mode 100644 index 0000000000..cd6c53a07f --- /dev/null +++ b/paddle/fluid/framework/ir/graph_helper.h @@ -0,0 +1,40 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/node.h" + +namespace paddle { +namespace framework { +namespace ir { +// Test if the graph contains circle. +bool HasCircle(const Graph &graph); + +// Topology Sort the operations in the graph from inputs to outputs. +// `graph` cannot contain circle. +std::vector TopologySortOperations(const Graph &graph); + +// Build an adjacency list of operations for the `graph`. +std::map> BuildOperationAdjList( + const Graph &graph); + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_helper_test.cc b/paddle/fluid/framework/ir/graph_helper_test.cc new file mode 100644 index 0000000000..a260dd3da2 --- /dev/null +++ b/paddle/fluid/framework/ir/graph_helper_test.cc @@ -0,0 +1,125 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/graph.h" +#include +#include "gtest/gtest.h" +#include "paddle/fluid/framework/ir/graph_helper.h" +#include "paddle/fluid/framework/program_desc.h" + +namespace paddle { +namespace framework { +namespace ir { + +void BuildCircleGraph(Graph* g) { + ir::Node* o1 = g->CreateEmptyNode("op1", Node::Type::kOperation); + ir::Node* v1 = g->CreateEmptyNode("var1", Node::Type::kVariable); + + o1->outputs.push_back(v1); + o1->inputs.push_back(v1); + v1->inputs.push_back(o1); + v1->outputs.push_back(o1); +} + +void BuildCircleGraph2(Graph* g) { + ir::Node* o1 = g->CreateEmptyNode("op1", Node::Type::kOperation); + ir::Node* o2 = g->CreateEmptyNode("op2", Node::Type::kOperation); + ir::Node* v1 = g->CreateEmptyNode("var1", Node::Type::kVariable); + ir::Node* v2 = g->CreateEmptyNode("var2", Node::Type::kVariable); + + o1->outputs.push_back(v1); + o2->inputs.push_back(v1); + v1->inputs.push_back(o1); + v1->outputs.push_back(o2); + + o2->outputs.push_back(v2); + o1->inputs.push_back(v2); + v2->inputs.push_back(o2); + v2->outputs.push_back(o1); +} + +void BuildNoCircleGraph(Graph* g) { + ir::Node* o1 = g->CreateEmptyNode("op1", Node::Type::kOperation); + ir::Node* o2 = g->CreateEmptyNode("op2", Node::Type::kOperation); + ir::Node* o3 = g->CreateEmptyNode("op3", Node::Type::kOperation); + ir::Node* o4 = g->CreateEmptyNode("op4", Node::Type::kOperation); + ir::Node* o5 = g->CreateEmptyNode("op5", Node::Type::kOperation); + ir::Node* v1 = g->CreateEmptyNode("var1", Node::Type::kVariable); + ir::Node* v2 = g->CreateEmptyNode("var2", Node::Type::kVariable); + ir::Node* v3 = g->CreateEmptyNode("var3", Node::Type::kVariable); + ir::Node* v4 = g->CreateEmptyNode("var4", Node::Type::kVariable); + + // o1->v1->o2 + o1->outputs.push_back(v1); + o2->inputs.push_back(v1); + v1->inputs.push_back(o1); + v1->outputs.push_back(o2); + // o2->v2->o3 + // o2->v2->o4 + o2->outputs.push_back(v2); + o3->inputs.push_back(v2); + o4->inputs.push_back(v2); + v2->inputs.push_back(o2); + v2->outputs.push_back(o3); + v2->outputs.push_back(o4); + // o2->v3->o5 + o2->outputs.push_back(v3); + o5->inputs.push_back(v3); + v3->inputs.push_back(o2); + v3->outputs.push_back(o5); + // o3-v4->o5 + o3->outputs.push_back(v4); + o5->inputs.push_back(v4); + v4->inputs.push_back(o3); + v4->outputs.push_back(o5); +} + +TEST(GraphHelperTest, Basic) { + ProgramDesc prog; + + Graph g(prog); + BuildCircleGraph(&g); + ASSERT_TRUE(HasCircle(g)); + + Graph g2(prog); + BuildCircleGraph2(&g2); + ASSERT_TRUE(HasCircle(g2)); + + auto adj_list = BuildOperationAdjList(g2); + for (auto& adj : adj_list) { + auto& adj_set = adj.second; + if (adj.first->Name() == "op1") { + ASSERT_EQ((*adj_set.begin())->Name(), "op2"); + } else if (adj.first->Name() == "op2") { + ASSERT_EQ((*adj_set.begin())->Name(), "op1"); + } else { + ASSERT_TRUE(false); + } + } + + Graph g3(prog); + BuildNoCircleGraph(&g3); + ASSERT_FALSE(HasCircle(g3)); + auto sorted = TopologySortOperations(g3); + std::map node_map; + for (size_t i = 0; i < sorted.size(); ++i) { + node_map[sorted[i]->Name()] = i; + } + ASSERT_EQ(node_map.at("op1"), 0UL); + ASSERT_EQ(node_map.at("op2"), 1UL); + ASSERT_TRUE(node_map.at("op3") < node_map.at("op5")); +} +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_test.cc b/paddle/fluid/framework/ir/graph_test.cc new file mode 100644 index 0000000000..f9e6bdf362 --- /dev/null +++ b/paddle/fluid/framework/ir/graph_test.cc @@ -0,0 +1,114 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/graph.h" +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" + +namespace paddle { +namespace framework { + +class NOP : public OperatorBase { + public: + NOP(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + private: + void RunImpl(const Scope &scope, + const platform::Place &place) const override {} +}; + +class SumOpMaker : public OpProtoAndCheckerMaker { + public: + void Make() { + AddInput("X", "").AsDuplicable(); + AddOutput("Out", ""); + AddComment(""); + } +}; + +class SumOpVarTypeInference : public VarTypeInference { + public: + void operator()(const OpDesc &op_desc, BlockDesc *block) const override { + auto &inputs = op_desc.Input("X"); + auto default_var_type = proto::VarType::SELECTED_ROWS; + + bool any_input_is_lod_tensor = std::any_of( + inputs.begin(), inputs.end(), [block](const std::string &name) { + return block->Var(name)->GetType() == proto::VarType::LOD_TENSOR; + }); + if (any_input_is_lod_tensor) { + default_var_type = proto::VarType::LOD_TENSOR; + } + + auto out_var_name = op_desc.Output("Out").front(); + block->Var(out_var_name)->SetType(default_var_type); + } +}; +} // namespace framework +} // namespace paddle + +REGISTER_OPERATOR(sum, paddle::framework::NOP, paddle::framework::SumOpMaker, + paddle::framework::SumOpVarTypeInference); +REGISTER_OPERATOR(sum_without_infer_var_type, paddle::framework::NOP, + paddle::framework::SumOpMaker); + +namespace paddle { +namespace framework { + +TEST(GraphTest, Basic) { + ProgramDesc prog; + auto *op = prog.MutableBlock(0)->AppendOp(); + op->SetType("sum"); + op->SetInput("X", {"test_a", "test_b", "test_c"}); + op->SetOutput("Out", {"test_out"}); + op->SetAttr("op_role", 1); + + prog.MutableBlock(0)->Var("test_a")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_c")->SetType(proto::VarType::SELECTED_ROWS); + prog.MutableBlock(0)->Var("test_out"); + + op->InferVarType(prog.MutableBlock(0)); + + ASSERT_EQ(proto::VarType::SELECTED_ROWS, + prog.MutableBlock(0)->Var("test_out")->GetType()); + + prog.MutableBlock(0)->Var("test_b")->SetType(proto::VarType::LOD_TENSOR); + op->InferVarType(prog.MutableBlock(0)); + ASSERT_EQ(proto::VarType::LOD_TENSOR, + prog.MutableBlock(0)->Var("test_out")->GetType()); + + std::unique_ptr g(new ir::Graph(prog)); + std::vector nodes(g->Nodes().begin(), g->Nodes().end()); + for (ir::Node *n : nodes) { + if (n->Name() == "sum") { + ASSERT_EQ(n->inputs.size(), 3UL); + ASSERT_EQ(n->outputs.size(), 1UL); + } else if (n->Name() == "test_a" || n->Name() == "test_b" || + n->Name() == "test_c") { + ASSERT_EQ(n->inputs.size(), 0UL); + ASSERT_EQ(n->outputs.size(), 1UL); + } else if (n->Name() == "test_out") { + ASSERT_EQ(n->inputs.size(), 1UL); + ASSERT_EQ(n->outputs.size(), 0UL); + } + } + ASSERT_EQ(nodes.size(), 5); +} +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/graph_viz_pass.cc b/paddle/fluid/framework/ir/graph_viz_pass.cc new file mode 100644 index 0000000000..8cb812d138 --- /dev/null +++ b/paddle/fluid/framework/ir/graph_viz_pass.cc @@ -0,0 +1,72 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/framework/ir/graph_viz_pass.h" + +namespace paddle { +namespace framework { +namespace ir { +static const char kGraphVizPath[] = "graph_viz_path"; + +std::unique_ptr GraphVizPass::ApplyImpl( + std::unique_ptr graph) const { + const std::string graph_viz_path = Get(kGraphVizPath); + std::unique_ptr fout(new std::ofstream(graph_viz_path)); + PADDLE_ENFORCE(fout->good()); + std::ostream& sout = *fout; + + size_t var_id = 0; + std::unordered_map vars; + + sout << "digraph G {\n"; + + for (const ir::Node* n : graph->Nodes()) { + if (n->NodeType() != ir::Node::Type::kVariable) continue; + size_t cur_var_id = var_id++; + vars[n] = cur_var_id; + + sout << "var_" << cur_var_id << " [label=\"" << n->Name() << "\"]" + << std::endl; + } + + size_t op_id = 0; + for (const ir::Node* n : graph->Nodes()) { + if (n->NodeType() != ir::Node::Type::kOperation) continue; + std::string op_name = "op_" + std::to_string(op_id++); + sout << op_name << " [label=\"" << n->Name() << "\", shape=rect]" + << std::endl; + for (auto in : n->inputs) { + std::string var_name = "var_" + std::to_string(vars[in]); + sout << var_name << " -> " << op_name << std::endl; + } + + for (auto out : n->outputs) { + std::string var_name = "var_" + std::to_string(vars[out]); + sout << op_name << " -> " << var_name << std::endl; + } + } + + sout << "}\n"; + return graph; +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(graph_viz_pass, paddle::framework::ir::GraphVizPass) + .RequirePassAttr(paddle::framework::ir::kGraphVizPath); diff --git a/paddle/fluid/framework/ir/graph_viz_pass.h b/paddle/fluid/framework/ir/graph_viz_pass.h new file mode 100644 index 0000000000..1fd8c8a26e --- /dev/null +++ b/paddle/fluid/framework/ir/graph_viz_pass.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/pass.h" + +namespace paddle { +namespace framework { +namespace ir { + +class GraphVizPass : public Pass { + protected: + std::unique_ptr ApplyImpl( + std::unique_ptr graph) const override; +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/node.cc b/paddle/fluid/framework/ir/node.cc new file mode 100644 index 0000000000..aca77da8d6 --- /dev/null +++ b/paddle/fluid/framework/ir/node.cc @@ -0,0 +1,23 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/node.h" + +namespace paddle { +namespace framework { +namespace ir { +const char Node::kControlDepVarName[] = "__control_var"; +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/node.h b/paddle/fluid/framework/ir/node.h new file mode 100644 index 0000000000..b3138fccee --- /dev/null +++ b/paddle/fluid/framework/ir/node.h @@ -0,0 +1,76 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/var_desc.h" +#include "paddle/fluid/platform/macros.h" + +namespace paddle { +namespace framework { +namespace ir { + +class Node { + public: + enum class Type { kOperation, kVariable }; + static const char kControlDepVarName[]; + + explicit Node(const std::string& name, Type type) + : name_(name), var_desc_(nullptr), op_desc_(nullptr), type_(type) {} + + explicit Node(VarDesc* var_desc) + : name_(var_desc->Name()), + var_desc_(var_desc), + op_desc_(nullptr), + type_(Type::kVariable) {} + + explicit Node(OpDesc* op_desc) + : name_(op_desc->Type()), + var_desc_(nullptr), + op_desc_(op_desc), + type_(Type::kOperation) {} + + Type NodeType() const { return type_; } + + std::string Name() const { return name_; } + + VarDesc* Var() { + PADDLE_ENFORCE(type_ == Type::kVariable); + return var_desc_; + } + + OpDesc* Op() { + PADDLE_ENFORCE(type_ == Type::kOperation); + return op_desc_; + } + + std::vector inputs; + std::vector outputs; + + protected: + const std::string name_; + VarDesc* var_desc_; + OpDesc* op_desc_; + Type type_; + + private: + DISABLE_COPY_AND_ASSIGN(Node); +}; + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/pass.cc b/paddle/fluid/framework/ir/pass.cc new file mode 100644 index 0000000000..d7158eba62 --- /dev/null +++ b/paddle/fluid/framework/ir/pass.cc @@ -0,0 +1,46 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/pass.h" +#include "paddle/fluid/framework/ir/graph_helper.h" + +namespace paddle { +namespace framework { +namespace ir { +std::unique_ptr Pass::Apply(std::unique_ptr graph) const { + PADDLE_ENFORCE(!applied_, "Pass can only Apply() once."); + PADDLE_ENFORCE(graph.get(), "graph passed to Pass::Apply() cannot be empty."); + for (const std::string& attr : required_pass_attrs_) { + PADDLE_ENFORCE(attrs_.find(attr) != attrs_.end(), + "Required pass atrribute %s not set.", attr); + } + for (const std::string& attr : required_graph_attrs_) { + PADDLE_ENFORCE(graph->Has(attr), "Required graph atrribute %s not set.", + attr); + } + auto applied_graph = ApplyImpl(std::move(graph)); + // TODO(panyx0718): Add more verifications. + PADDLE_ENFORCE(!HasCircle(*applied_graph), + "Illegal Pass. Generated graph shouldn't has cycle."); + applied_ = true; + return applied_graph; +} + +PassRegistry& PassRegistry::Instance() { + static PassRegistry g_pass_info_map; + return g_pass_info_map; +} +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/pass.h b/paddle/fluid/framework/ir/pass.h new file mode 100644 index 0000000000..0f14083d25 --- /dev/null +++ b/paddle/fluid/framework/ir/pass.h @@ -0,0 +1,200 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/node.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/platform/variant.h" + +namespace paddle { +namespace framework { +namespace ir { +template +struct PassRegistrar; + +class Pass { + public: + Pass() = default; + virtual ~Pass() { + for (auto &attr : attrs_) { + if (attr_dels_.find(attr.first) != attr_dels_.end()) { + attr_dels_[attr.first](); + } + } + attrs_.clear(); + attr_dels_.clear(); + } + + std::unique_ptr Apply(std::unique_ptr graph) const; + + // Get a reference to the attributed previously set. + template + AttrType &Get(const std::string &attr_name) const { + PADDLE_ENFORCE(attrs_.find(attr_name) != attrs_.end(), + "%s attr not registered for pass.", attr_name); + return *boost::any_cast(attrs_.at(attr_name)); + } + + // Set a pointer to the attribute. Pass takes ownership of the attribute. + template + void Set(const std::string &attr_name, AttrType *attr) { + PADDLE_ENFORCE(attrs_.count(attr_name) == 0, "%s already set in the pass", + attr_name); + attrs_[attr_name] = attr; + attr_dels_[attr_name] = [attr, attr_name]() { + VLOG(3) << "deleting " << attr_name; + delete attr; + }; + } + + // Set a pointer to the attribute. Pass doesn't take ownership. Caller + // should delete the attribute. + template + void SetNotOwned(const std::string &attr_name, AttrType *attr) { + PADDLE_ENFORCE(attrs_.count(attr_name) == 0); + attrs_[attr_name] = attr; + } + + protected: + virtual std::unique_ptr ApplyImpl( + std::unique_ptr graph) const = 0; + + private: + template + friend struct PassRegistrar; + + void RegisterRequiredPassAttrs(const std::unordered_set &attrs) { + required_pass_attrs_.insert(attrs.begin(), attrs.end()); + } + + void RegisterRequiredGraphAttrs( + const std::unordered_set &attrs) { + required_graph_attrs_.insert(attrs.begin(), attrs.end()); + } + + mutable bool applied_{false}; + std::unordered_set required_pass_attrs_; + std::unordered_set required_graph_attrs_; + std::map attrs_; + std::map> attr_dels_; +}; + +using PassCreator = std::function()>; + +class Registrar { + public: + // In our design, various kinds of passes, + // have their corresponding registry and registrar. The action of + // registration is in the constructor of a global registrar variable, which + // are not used in the code that calls package framework, and would + // be removed from the generated binary file by the linker. To avoid such + // removal, we add Touch to all registrar classes and make USE_PASS macros to + // call this method. So, as long as the callee code calls USE_PASS, the global + // registrar variable won't be removed by the linker. + void Touch() {} +}; + +class PassRegistry { + public: + static PassRegistry &Instance(); + + bool Has(const std::string &pass_type) const { + return map_.find(pass_type) != map_.end(); + } + + void Insert(const std::string &pass_type, const PassCreator &pass_creator) { + PADDLE_ENFORCE(!Has(pass_type), "Pass %s has been registered", pass_type); + map_.insert({pass_type, pass_creator}); + } + + std::unique_ptr Get(const std::string &pass_type) const { + PADDLE_ENFORCE(Has(pass_type), "Pass %s has not been registered", + pass_type); + return map_.at(pass_type)(); + } + + private: + PassRegistry() = default; + std::unordered_map map_; + + DISABLE_COPY_AND_ASSIGN(PassRegistry); +}; + +template +struct PassRegistrar : public Registrar { + explicit PassRegistrar(const char *pass_type) { + PADDLE_ENFORCE(!PassRegistry::Instance().Has(pass_type), + "'%s' is registered more than once.", pass_type); + PassRegistry::Instance().Insert( + pass_type, [this]() -> std::unique_ptr { + std::unique_ptr pass(new PassType()); + pass->RegisterRequiredPassAttrs(this->required_pass_attrs_); + pass->RegisterRequiredGraphAttrs(this->required_graph_attrs_); + return pass; + }); + } + + PassRegistrar &RequirePassAttr(const std::string &attr) { + required_pass_attrs_.insert(attr); + return *this; + } + + PassRegistrar &RequireGraphAttr(const std::string &attr) { + required_graph_attrs_.insert(attr); + return *this; + } + + private: + std::unordered_set required_pass_attrs_; + std::unordered_set required_graph_attrs_; +}; + +#define STATIC_ASSERT_PASS_GLOBAL_NAMESPACE(uniq_name, msg) \ + struct __test_global_namespace_##uniq_name##__ {}; \ + static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \ + __test_global_namespace_##uniq_name##__>::value, \ + msg) + +// Register a new pass that can be applied on the IR. +#define REGISTER_PASS(pass_type, pass_class) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __reg_pass__##pass_type, \ + "REGISTER_PASS must be called in global namespace"); \ + static ::paddle::framework::ir::PassRegistrar \ + __pass_registrar_##pass_type##__(#pass_type); \ + int TouchPassRegistrar_##pass_type() { \ + __pass_registrar_##pass_type##__.Touch(); \ + return 0; \ + } \ + static ::paddle::framework::ir::PassRegistrar \ + &__pass_tmp_registrar_##pass_type##__ __attribute__((unused)) = \ + __pass_registrar_##pass_type##__ + +#define USE_PASS(pass_type) \ + STATIC_ASSERT_PASS_GLOBAL_NAMESPACE( \ + __use_pass_itself_##pass_type, \ + "USE_PASS must be called in global namespace"); \ + extern int TouchPassRegistrar_##pass_type(); \ + static int use_pass_itself_##pass_type##_ __attribute__((unused)) = \ + TouchPassRegistrar_##pass_type() + +} // namespace ir +} // namespace framework +} // namespace paddle diff --git a/paddle/fluid/framework/ir/pass_test.cc b/paddle/fluid/framework/ir/pass_test.cc new file mode 100644 index 0000000000..5b5011412e --- /dev/null +++ b/paddle/fluid/framework/ir/pass_test.cc @@ -0,0 +1,112 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/ir/pass.h" +#include +#include "gtest/gtest.h" +#include "paddle/fluid/framework/ir/graph.h" + +namespace paddle { +namespace framework { +namespace ir { +void BuildCircleGraph(Graph* g) { + ir::Node* o1 = g->CreateEmptyNode("op1", Node::Type::kOperation); + ir::Node* o2 = g->CreateEmptyNode("op2", Node::Type::kOperation); + ir::Node* v1 = g->CreateEmptyNode("var1", Node::Type::kVariable); + ir::Node* v2 = g->CreateEmptyNode("var2", Node::Type::kVariable); + + o1->outputs.push_back(v1); + o2->inputs.push_back(v1); + v1->inputs.push_back(o1); + v1->outputs.push_back(o2); + + o2->outputs.push_back(v2); + o1->inputs.push_back(v2); + v2->inputs.push_back(o2); + v2->outputs.push_back(o1); +} + +class TestPass : public Pass { + protected: + std::unique_ptr ApplyImpl(std::unique_ptr graph) const { + graph->Set("copy_test_pass_attr", new int); + graph->Set("copy_test_graph_attr", new int); + + int test_pass_attr = this->Get("test_pass_attr"); + graph->Get("copy_test_pass_attr") = test_pass_attr + 1; + + int test_graph_attr = graph->Get("test_graph_attr"); + graph->Get("copy_test_graph_attr") = test_graph_attr + 1; + return graph; + } +}; + +TEST(PassTest, TestPassAttrCheck) { + ProgramDesc prog; + auto pass = PassRegistry::Instance().Get("test_pass"); + std::unique_ptr graph(new Graph(prog)); + std::string exception; + try { + graph = pass->Apply(std::move(graph)); + } catch (paddle::platform::EnforceNotMet e) { + exception = std::string(e.what()); + } + ASSERT_TRUE(exception.find("test_pass_attr not set") != exception.npos); + + int val = 1; + graph.reset(new Graph(prog)); + pass->SetNotOwned("test_pass_attr", &val); + + try { + graph = pass->Apply(std::move(graph)); + } catch (paddle::platform::EnforceNotMet e) { + exception = std::string(e.what()); + } + ASSERT_TRUE(exception.find("test_graph_attr not set") != exception.npos); + + graph.reset(new Graph(prog)); + graph->Set("test_graph_attr", new int); + graph->Get("test_graph_attr") = 1; + graph = pass->Apply(std::move(graph)); + ASSERT_EQ(graph->Get("copy_test_pass_attr"), 2); + ASSERT_EQ(graph->Get("copy_test_graph_attr"), 2); + + try { + graph = pass->Apply(std::move(graph)); + } catch (paddle::platform::EnforceNotMet e) { + exception = std::string(e.what()); + } + ASSERT_TRUE(exception.find("Pass can only Apply() once") != exception.npos); + + pass = PassRegistry::Instance().Get("test_pass"); + pass->SetNotOwned("test_pass_attr", &val); + graph.reset(new Graph(prog)); + BuildCircleGraph(graph.get()); + graph->Set("test_graph_attr", new int); + graph->Get("test_graph_attr") = 2; + try { + auto tmp = pass->Apply(std::move(graph)); + } catch (paddle::platform::EnforceNotMet e) { + exception = std::string(e.what()); + } + ASSERT_TRUE(exception.find("shouldn't has cycle") != exception.npos); +} + +} // namespace ir +} // namespace framework +} // namespace paddle + +REGISTER_PASS(test_pass, paddle::framework::ir::TestPass) + .RequirePassAttr("test_pass_attr") + .RequireGraphAttr("test_graph_attr"); diff --git a/paddle/fluid/framework/lod_tensor.cc b/paddle/fluid/framework/lod_tensor.cc index a56674cbe2..919029c38f 100644 --- a/paddle/fluid/framework/lod_tensor.cc +++ b/paddle/fluid/framework/lod_tensor.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/memory/memory.h" @@ -51,8 +52,6 @@ std::ostream &operator<<(std::ostream &os, const LoD &lod) { } std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { - PADDLE_ENFORCE(t.type().hash_code() == typeid(float).hash_code()); - if (!platform::is_cpu_place(t.place())) { LoDTensor tt; framework::TensorCopy(t, platform::CPUPlace(), &tt); @@ -70,7 +69,13 @@ std::ostream &operator<<(std::ostream &os, const LoDTensor &t) { // only print first ten elements int64_t size = t.numel() < 10 ? t.numel() : 10; for (int64_t i = 0; i < size; ++i) { - os << t.data()[i] << " "; + if (IsType(t.type())) { + os << t.data()[i] << " "; + } else if (IsType(t.type())) { + os << t.data()[i] << " "; + } else { + PADDLE_THROW("LoDTensor data type not in [float, int64_t]"); + } } return os; @@ -85,6 +90,7 @@ std::string LoDToString(const LoD &lod) { LoD SliceInLevel(const LoD &in, size_t level, size_t elem_begin, size_t elem_end) { PADDLE_ENFORCE_LT(level, in.size()); + PADDLE_ENFORCE_LT(elem_begin, elem_end); PADDLE_ENFORCE_LT(elem_end, in[level].size()); LoD res; @@ -306,19 +312,22 @@ void WriteToRecordIO(recordio::Writer *writer, writer->Write(buffer.str()); } -std::vector ReadFromRecordIO( - recordio::Scanner *scanner, const platform::DeviceContext &dev_ctx) { - std::vector result; - if (scanner->HasNext()) { - std::istringstream sin(scanner->Next()); - uint32_t sz; - sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); - result.resize(sz); - for (uint32_t i = 0; i < sz; ++i) { - DeserializeFromStream(sin, &result[i], dev_ctx); - } +bool ReadFromRecordIO(recordio::Scanner *scanner, + const platform::DeviceContext &dev_ctx, + std::vector *result_ptr) { + if (!scanner->HasNext()) { + return false; } - return result; + std::istringstream sin(scanner->Next()); + uint32_t sz; + sin.read(reinterpret_cast(&sz), sizeof(uint32_t)); + auto &result = *result_ptr; + result.resize(sz); + for (uint32_t i = 0; i < sz; ++i) { + DeserializeFromStream(sin, &result[i], dev_ctx); + } + + return true; } std::vector LoDTensor::SplitLoDTensor( @@ -380,7 +389,7 @@ void LoDTensor::MergeLoDTensor( LoD new_lod = lod_tensors[0]->lod(); for (size_t i = 1; i < lod_tensors.size(); ++i) { auto *t = lod_tensors[i]; - PADDLE_ENFORCE_EQ(new_type.hash_code(), t->type().hash_code()); + PADDLE_ENFORCE_EQ(new_type, t->type()); PADDLE_ENFORCE_EQ(new_layout, t->layout()); PADDLE_ENFORCE_EQ(framework::product(new_dim) / new_dim[0], @@ -388,6 +397,7 @@ void LoDTensor::MergeLoDTensor( new_dim[0] += t->dims()[0]; auto &lod = t->lod(); + PADDLE_ENFORCE_EQ(new_lod.size(), lod.size()); for (size_t j = 0; j < lod.size(); ++j) { auto &sub_lod = new_lod[j]; auto &offset = sub_lod.back(); @@ -410,5 +420,38 @@ void LoDTensor::MergeLoDTensor( } } +LoD ConvertToLengthBasedLoD(const LoD &offset_lod) { + LoD length_lod; + length_lod.reserve(offset_lod.size()); + for (size_t lvl = 0; lvl < offset_lod.size(); ++lvl) { + std::vector level; + if (offset_lod[lvl].size() > 0) { + level.reserve(offset_lod[lvl].size() - 1); + } + for (size_t idx = 0; idx < offset_lod[lvl].size() - 1; ++idx) { + level.push_back(offset_lod[lvl][idx + 1] - offset_lod[lvl][idx]); + } + length_lod.push_back(level); + } + return length_lod; +} + +LoD ConvertToOffsetBasedLoD(const LoD &length_lod) { + LoD offset_lod; + offset_lod.reserve(length_lod.size()); + for (size_t lvl = 0; lvl < length_lod.size(); ++lvl) { + std::vector level; + level.reserve(length_lod[lvl].size() + 1); + size_t tmp = 0; + level.push_back(tmp); + for (size_t idx = 0; idx < length_lod[lvl].size(); ++idx) { + tmp += length_lod[lvl][idx]; + level.push_back(tmp); + } + offset_lod.push_back(level); + } + return offset_lod; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/lod_tensor.h b/paddle/fluid/framework/lod_tensor.h index 1159fee39b..e9b473d547 100644 --- a/paddle/fluid/framework/lod_tensor.h +++ b/paddle/fluid/framework/lod_tensor.h @@ -223,8 +223,23 @@ extern void WriteToRecordIO(recordio::Writer* writer, const std::vector& tensor, const platform::DeviceContext& dev_ctx); -extern std::vector ReadFromRecordIO( - recordio::Scanner* scanner, const platform::DeviceContext& dev_ctx); +extern bool ReadFromRecordIO(recordio::Scanner* scanner, + const platform::DeviceContext& dev_ctx, + std::vector* result_ptr); + +/* + * Convert between length-based LoD and offset-based LoD. + * The implementation of LoDTensor class use offset-based LoD. + * However, we want to expose the more user-friendly length-based + * LoD to the Python side instead. + * + * Example: + * If offset_lod = [[0, 2, 3],[0, 3, 5, 9]] + * then length_lod = [[2, 1], [3, 2, 4]] + */ +LoD ConvertToLengthBasedLoD(const LoD& offset_lod); + +LoD ConvertToOffsetBasedLoD(const LoD& length_lod); } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/lod_tensor_test.cc b/paddle/fluid/framework/lod_tensor_test.cc index 77e5ec4c7d..cd50aaa260 100644 --- a/paddle/fluid/framework/lod_tensor_test.cc +++ b/paddle/fluid/framework/lod_tensor_test.cc @@ -26,6 +26,20 @@ namespace paddle { namespace framework { +TEST(LoD, PrintLoDTensor) { + LoDTensor tensor1; + tensor1.mutable_data(platform::CPUPlace()); + tensor1.data()[0] = 0.2; + tensor1.data()[1] = 0.5; + LOG(INFO) << tensor1; + + LoDTensor tensor2; + tensor2.mutable_data(platform::CPUPlace()); + tensor2.data()[0] = 1; + tensor2.data()[1] = 2; + LOG(INFO) << tensor2; +} + TEST(LoD, data) { LoD lod{{0, 1, 2}}; lod.push_back({0, 2, 4, 5}); @@ -37,7 +51,7 @@ TEST(LoD, data) { } } -TEST(LodExpand, test) { +TEST(LoD, ExpandLoD) { LoD lod{{0, 2}}; LoDTensor tensor; tensor.set_lod(lod); @@ -228,11 +242,44 @@ TEST(LoD, CheckAbsLoD) { ASSERT_FALSE(CheckAbsLoD(abs_lod0)); } -TEST(LoDTensor, RecordIO) { +TEST(LoD, ConvertToLengthBasedLoD) { + LoD offset_lod; + offset_lod.push_back(std::vector({0, 2})); + offset_lod.push_back(std::vector({0, 1, 3})); + offset_lod.push_back(std::vector({0, 2, 4, 5})); + + LoD length_lod = ConvertToLengthBasedLoD(offset_lod); + + LoD expected; + expected.push_back(std::vector({2})); + expected.push_back(std::vector({1, 2})); + expected.push_back(std::vector({2, 2, 1})); + + EXPECT_EQ(length_lod, expected); +} + +TEST(LoD, ConvertToOffsetBasedLoD) { + LoD length_lod; + length_lod.push_back(std::vector({2})); + length_lod.push_back(std::vector({1, 2})); + length_lod.push_back(std::vector({2, 2, 1})); + + LoD offset_lod = ConvertToOffsetBasedLoD(length_lod); + + LoD expected; + expected.push_back(std::vector({0, 2})); + expected.push_back(std::vector({0, 1, 3})); + expected.push_back(std::vector({0, 2, 4, 5})); + + EXPECT_EQ(offset_lod, expected); +} + +template +static void TestRecordIO() { LoDTensor tensor; - int* tmp = tensor.mutable_data(make_ddim({4, 5}), platform::CPUPlace()); + T* tmp = tensor.mutable_data(make_ddim({4, 5}), platform::CPUPlace()); for (int i = 0; i < 20; ++i) { - tmp[i] = i; + tmp[i] = static_cast(i); } std::stringstream* stream = new std::stringstream(); @@ -247,23 +294,32 @@ TEST(LoDTensor, RecordIO) { auto assert_tensor_ok = [](const LoDTensor& tensor) { for (int i = 0; i < 20; ++i) { - ASSERT_EQ(tensor.data()[i], i); + ASSERT_EQ(tensor.data()[i], static_cast(i)); } }; { std::unique_ptr stream_ptr(stream); recordio::Scanner scanner(std::move(stream_ptr)); - auto tensors = ReadFromRecordIO(&scanner, ctx); + std::vector tensors; + ASSERT_TRUE(ReadFromRecordIO(&scanner, ctx, &tensors)); ASSERT_EQ(tensors.size(), static_cast(2)); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); - tensors = ReadFromRecordIO(&scanner, ctx); + ASSERT_TRUE(ReadFromRecordIO(&scanner, ctx, &tensors)); ASSERT_EQ(tensors.size(), static_cast(2)); assert_tensor_ok(tensors[0]); assert_tensor_ok(tensors[1]); } } +TEST(LoDTensor, RecordIO) { + TestRecordIO(); + TestRecordIO(); + TestRecordIO(); + TestRecordIO(); + TestRecordIO(); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/lod_tensor_test.cu b/paddle/fluid/framework/lod_tensor_test.cu index e3efbe4c46..b9950627ca 100644 --- a/paddle/fluid/framework/lod_tensor_test.cu +++ b/paddle/fluid/framework/lod_tensor_test.cu @@ -17,9 +17,9 @@ #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" __global__ void test(size_t* a, int size) { diff --git a/paddle/fluid/framework/mixed_vector.h b/paddle/fluid/framework/mixed_vector.h index 29b3396bc9..7836ecb127 100644 --- a/paddle/fluid/framework/mixed_vector.h +++ b/paddle/fluid/framework/mixed_vector.h @@ -16,6 +16,7 @@ #include #include +#include #include #include "paddle/fluid/framework/tensor.h" @@ -26,6 +27,7 @@ namespace paddle { namespace framework { +#if defined(PADDLE_WITH_CUDA) // Vector implements the std::vector interface, and can get Data or // MutableData from any place. The data will be synced implicitly inside. template @@ -37,11 +39,11 @@ class Vector { Vector() { InitEmpty(); } // Fill vector with value. The vector size is `count`. - explicit Vector(size_t count, const T& value = T()) { + explicit Vector(size_t count, const T &value = T()) { InitEmpty(); if (count != 0) { resize(count); - T* ptr = begin(); + T *ptr = begin(); for (size_t i = 0; i < count; ++i) { ptr[i] = value; } @@ -59,7 +61,7 @@ class Vector { // implicit cast from std::vector. template - Vector(const std::vector& dat) { // NOLINT + Vector(const std::vector &dat) { // NOLINT if (dat.size() == 0) { InitEmpty(); } else { @@ -68,10 +70,10 @@ class Vector { } // Copy ctor - Vector(const Vector& other) { this->operator=(other); } + Vector(const Vector &other) { this->operator=(other); } // Copy operator - Vector& operator=(const Vector& other) { + Vector &operator=(const Vector &other) { if (other.size() != 0) { this->InitByIter(other.size(), other.begin(), other.end()); } else { @@ -81,7 +83,7 @@ class Vector { } // Move ctor - Vector(Vector&& other) { + Vector(Vector &&other) { this->size_ = other.size_; this->flag_ = other.flag_; if (other.cuda_vec_.memory_size()) { @@ -93,13 +95,13 @@ class Vector { } // CPU data access method. Mutable. - T& operator[](size_t i) { + T &operator[](size_t i) { MutableCPU(); - return const_cast(cpu_vec_.data())[i]; + return const_cast(cpu_vec_.data())[i]; } // CPU data access method. Immutable. - const T& operator[](size_t i) const { + const T &operator[](size_t i) const { ImmutableCPU(); return cpu_vec_.data()[i]; } @@ -107,43 +109,43 @@ class Vector { // std::vector iterator methods. Based on CPU data access method size_t size() const { return size_; } - T* begin() { return capacity() == 0 ? &EmptyDummy() : &this->operator[](0); } + T *begin() { return capacity() == 0 ? &EmptyDummy() : &this->operator[](0); } - T* end() { + T *end() { return capacity() == 0 ? &EmptyDummy() : &this->operator[](size()); } - T& front() { return *begin(); } + T &front() { return *begin(); } - T& back() { + T &back() { auto it = end(); --it; return *it; } - const T* begin() const { + const T *begin() const { return capacity() == 0 ? &EmptyDummy() : &this->operator[](0); } - const T* end() const { + const T *end() const { return capacity() == 0 ? &EmptyDummy() : &this->operator[](size()); } - const T* cbegin() const { return begin(); } + const T *cbegin() const { return begin(); } - const T* cend() const { return end(); } + const T *cend() const { return end(); } - const T& back() const { + const T &back() const { auto it = end(); --it; return *it; } - T* data() { return begin(); } + T *data() { return begin(); } - const T* data() const { return begin(); } + const T *data() const { return begin(); } - const T& front() const { return *begin(); } + const T &front() const { return *begin(); } // end of std::vector iterator methods // assign this from iterator. @@ -169,7 +171,7 @@ class Vector { void Extend(It begin, It end) { size_t pre_size = size_; resize(pre_size + (end - begin)); - T* ptr = this->begin() + pre_size; + T *ptr = this->begin() + pre_size; for (; begin < end; ++begin, ++ptr) { *ptr = *begin; } @@ -183,9 +185,9 @@ class Vector { MutableCPU(); Tensor cpu_tensor; platform::Place cpu = platform::CPUPlace(); - T* ptr = cpu_tensor.mutable_data( + T *ptr = cpu_tensor.mutable_data( framework::make_ddim({static_cast(size)}), cpu); - const T* old_ptr = + const T *old_ptr = cpu_vec_.memory_size() == 0 ? nullptr : cpu_vec_.data(); if (old_ptr != nullptr) { std::copy(old_ptr, old_ptr + size_, ptr); @@ -196,7 +198,7 @@ class Vector { } // get cuda ptr. immutable - const T* CUDAData(platform::Place place) const { + const T *CUDAData(platform::Place place) const { PADDLE_ENFORCE(platform::is_gpu_place(place), "CUDA Data must on CUDA place"); ImmutableCUDA(place); @@ -204,10 +206,10 @@ class Vector { } // get cuda ptr. mutable - T* CUDAMutableData(platform::Place place) { - const T* ptr = CUDAData(place); + T *CUDAMutableData(platform::Place place) { + const T *ptr = CUDAData(place); flag_ = kDirty | kDataInCUDA; - return const_cast(ptr); + return const_cast(ptr); } // clear @@ -228,7 +230,7 @@ class Vector { } // the unify method to access CPU or CUDA data. immutable. - const T* Data(platform::Place place) const { + const T *Data(platform::Place place) const { if (platform::is_gpu_place(place)) { return CUDAData(place); } else { @@ -237,7 +239,7 @@ class Vector { } // the unify method to access CPU or CUDA data. mutable. - T* MutableData(platform::Place place) { + T *MutableData(platform::Place place) { if (platform::is_gpu_place(place)) { return CUDAMutableData(place); } else { @@ -253,7 +255,7 @@ class Vector { return result; } - bool operator==(const Vector& other) const { + bool operator==(const Vector &other) const { if (size() != other.size()) return false; auto it1 = cbegin(); auto it2 = other.cbegin(); @@ -274,7 +276,7 @@ class Vector { template void InitByIter(size_t size, Iter begin, Iter end) { platform::Place cpu = platform::CPUPlace(); - T* ptr = this->cpu_vec_.template mutable_data( + T *ptr = this->cpu_vec_.template mutable_data( framework::make_ddim({static_cast(size)}), cpu); for (size_t i = 0; i < size; ++i) { *ptr++ = *begin++; @@ -368,7 +370,7 @@ class Vector { } } - static T& EmptyDummy() { + static T &EmptyDummy() { static T dummy = T(); return dummy; } @@ -379,5 +381,52 @@ class Vector { size_t size_; }; -} // namespace framework +#else // PADDLE_WITH_CUDA + +template +class CPUVector : public std::vector> { + public: + CPUVector() : std::vector() {} + CPUVector(size_t count, const T &value = T()) // NOLINT + : std::vector(count, value) {} + CPUVector(std::initializer_list init) : std::vector(init) {} + CPUVector(const std::vector &other) : std::vector(other) {} // NOLINT + CPUVector(const CPUVector &other) : std::vector(other) {} + CPUVector(CPUVector &&other) : std::vector(std::move(other)) {} + CPUVector(std::vector &&other) // NOLINT + : std::vector(std::move(other)) {} + CPUVector &operator=(const CPUVector &other) { + this->assign(other.begin(), other.end()); + return *this; + } + CPUVector &operator=(const std::vector &other) { + this->assign(other.begin(), other.end()); + return *this; + } + + friend std::ostream &operator<<(std::ostream &os, const CPUVector &other) { + std::stringstream ss; + for (auto v : other) { + os << v << " "; + } + return os; + } + + T &operator[](size_t id) { return this->at(id); } + + const T &operator[](size_t id) const { return this->at(id); } + + template + void Extend(const D &begin, const D &end) { + this->reserve(this->size() + size_t(end - begin)); + this->insert(this->end(), begin, end); + } +}; + +template +using Vector = CPUVector; + +#endif // PADDLE_WITH_CUDA + +}; // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/mixed_vector_test.cc b/paddle/fluid/framework/mixed_vector_test.cc new file mode 100644 index 0000000000..0599c8d384 --- /dev/null +++ b/paddle/fluid/framework/mixed_vector_test.cc @@ -0,0 +1,72 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include + +#include "glog/logging.h" +#include "gtest/gtest.h" +#include "paddle/fluid/framework/mixed_vector.h" + +template +using vec = paddle::framework::Vector; + +TEST(mixed_vector, CPU_VECTOR) { + vec tmp; + for (int i = 0; i < 10; ++i) { + tmp.push_back(i); + } + ASSERT_EQ(tmp.size(), 10UL); + vec tmp2; + tmp2 = tmp; + ASSERT_EQ(tmp2.size(), 10UL); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(tmp2[i], i); + ASSERT_EQ(tmp2[i], tmp[i]); + } + int cnt = 0; + for (auto& t : tmp2) { + ASSERT_EQ(t, cnt); + ++cnt; + } +} + +TEST(mixed_vector, InitWithCount) { + paddle::framework::Vector vec(10, 10); + for (int i = 0; i < 10; ++i) { + ASSERT_EQ(vec[i], 10); + } +} + +TEST(mixed_vector, ForEach) { + vec tmp; + for (auto& v : tmp) { + VLOG(3) << v; + } +} + +TEST(mixed_vector, Reserve) { + paddle::framework::Vector vec; + vec.reserve(1); + vec.push_back(0); + vec.push_back(0); + vec.push_back(0); +} + +TEST(mixed_vector, Resize) { + paddle::framework::Vector vec; + vec.resize(1); + vec.push_back(0); + vec.push_back(0); + vec.push_back(0); +} diff --git a/paddle/fluid/framework/mixed_vector_test.cu b/paddle/fluid/framework/mixed_vector_test.cu index d57f825108..4b0caa8d35 100644 --- a/paddle/fluid/framework/mixed_vector_test.cu +++ b/paddle/fluid/framework/mixed_vector_test.cu @@ -11,7 +11,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ + #include +#include #include "glog/logging.h" #include "gtest/gtest.h" @@ -21,26 +23,6 @@ template using vec = paddle::framework::Vector; -TEST(mixed_vector, CPU_VECTOR) { - vec tmp; - for (int i = 0; i < 10; ++i) { - tmp.push_back(i); - } - ASSERT_EQ(tmp.size(), 10UL); - vec tmp2; - tmp2 = tmp; - ASSERT_EQ(tmp2.size(), 10UL); - for (int i = 0; i < 10; ++i) { - ASSERT_EQ(tmp2[i], i); - ASSERT_EQ(tmp2[i], tmp[i]); - } - int cnt = 0; - for (auto& t : tmp2) { - ASSERT_EQ(t, cnt); - ++cnt; - } -} - static __global__ void multiply_10(int* ptr) { for (int i = 0; i < 10; ++i) { ptr[i] *= 10; @@ -91,24 +73,3 @@ TEST(mixed_vector, MultiGPU) { ASSERT_EQ(tmp[i], i * 100); } } - -TEST(mixed_vector, InitWithCount) { - paddle::framework::Vector vec(10, 10); - for (int i = 0; i < 10; ++i) { - ASSERT_EQ(vec[i], 10); - } -} - -TEST(mixed_vector, ForEach) { - vec tmp; - for (auto& v : tmp) { - } -} - -TEST(mixed_vector, Reserve) { - paddle::framework::Vector vec; - vec.reserve(1); - vec.push_back(0); - vec.push_back(0); - vec.push_back(0); -} diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index 076c457130..03f7e71c03 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -20,6 +20,7 @@ limitations under the License. */ #include #include "glog/logging.h" #include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_proto_maker.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/shape_inference.h" @@ -102,7 +103,7 @@ void OpDesc::CopyFrom(const OpDesc &op_desc) { need_update_ = true; } -OpDesc::OpDesc(const proto::OpDesc &desc, ProgramDesc *prog, BlockDesc *block) +OpDesc::OpDesc(const proto::OpDesc &desc, BlockDesc *block) : desc_(desc), need_update_(false) { // restore inputs_ int input_size = desc_.inputs_size(); @@ -210,6 +211,12 @@ void OpDesc::SetBlockAttr(const std::string &name, BlockDesc *block) { need_update_ = true; } +void OpDesc::SetBlocksAttr(const std::string &name, + std::vector blocks) { + this->attrs_[name] = blocks; + need_update_ = true; +} + void OpDesc::SetAttrMap( const std::unordered_map &attr_map) { attrs_ = attr_map; @@ -222,7 +229,29 @@ Attribute OpDesc::GetAttr(const std::string &name) const { return it->second; } -int OpDesc::GetBlockAttr(const std::string &name) const { +Attribute OpDesc::GetNullableAttr(const std::string &name) const { + auto it = attrs_.find(name); + if (it != attrs_.end()) { + return it->second; + } else { + return Attribute(); + } +} + +std::vector OpDesc::GetBlocksAttrIds(const std::string &name) const { + auto it = attrs_.find(name); + PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); + auto blocks = boost::get>(it->second); + + std::vector ids; + for (auto n : blocks) { + ids.push_back(n->ID()); + } + + return ids; +} + +int OpDesc::GetBlockAttrId(const std::string &name) const { auto it = attrs_.find(name); PADDLE_ENFORCE(it != attrs_.end(), "Attribute %s is not found", name); return boost::get(it->second)->ID(); @@ -233,13 +262,8 @@ const std::unordered_map &OpDesc::GetAttrMap() const { } void OpDesc::Rename(const std::string &old_name, const std::string &new_name) { - for (auto &input : inputs_) { - std::replace(input.second.begin(), input.second.end(), old_name, new_name); - } - for (auto &output : outputs_) { - std::replace(output.second.begin(), output.second.end(), old_name, - new_name); - } + RenameInput(old_name, new_name); + RenameOutput(old_name, new_name); need_update_ = true; } @@ -249,6 +273,13 @@ void OpDesc::RenameOutput(const std::string &old_name, std::replace(output.second.begin(), output.second.end(), old_name, new_name); } + + auto it = attrs_.find(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName()); + if (it != attrs_.end()) { + auto &op_vars = boost::get>(it->second); + std::replace(op_vars.begin(), op_vars.end(), old_name, new_name); + } + need_update_ = true; } @@ -257,6 +288,13 @@ void OpDesc::RenameInput(const std::string &old_name, for (auto &input : inputs_) { std::replace(input.second.begin(), input.second.end(), old_name, new_name); } + + auto it = attrs_.find(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName()); + if (it != attrs_.end()) { + auto &op_vars = boost::get>(it->second); + std::replace(op_vars.begin(), op_vars.end(), old_name, new_name); + } + need_update_ = true; } @@ -286,6 +324,13 @@ struct SetAttrDescVisitor : public boost::static_visitor { void operator()(const std::vector &v) const { VectorToRepeated(v, attr_->mutable_bools()); } + void operator()(const std::vector &v) const { + std::vector blocks_idx; + for (auto blk : v) { + blocks_idx.push_back(blk->ID()); + } + VectorToRepeated(blocks_idx, attr_->mutable_blocks_idx()); + } void operator()(BlockDesc *desc) const { attr_->set_block_idx(desc->ID()); } void operator()(int64_t v) const { attr_->set_l(v); } void operator()(boost::blank) const { PADDLE_THROW("Unexpected branch"); } diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 3ee36a47c1..b77d84125a 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -33,13 +33,14 @@ class OpDesc { OpDesc(const std::string &type, const VariableNameMap &inputs, const VariableNameMap &outputs, const AttributeMap &attrs); - OpDesc(const proto::OpDesc &desc, ProgramDesc *prog, BlockDesc *block); + OpDesc(const proto::OpDesc &desc, BlockDesc *block); explicit OpDesc(BlockDesc *block) : block_(block) {} OpDesc(const OpDesc &other, BlockDesc *block) { *this = other; block_ = block; + need_update_ = true; } void CopyFrom(const OpDesc &op_desc); @@ -76,9 +77,15 @@ class OpDesc { void SetBlockAttr(const std::string &name, BlockDesc *block); + void SetBlocksAttr(const std::string &name, std::vector blocks); + Attribute GetAttr(const std::string &name) const; - int GetBlockAttr(const std::string &name) const; + Attribute GetNullableAttr(const std::string &name) const; + + int GetBlockAttrId(const std::string &name) const; + + std::vector GetBlocksAttrIds(const std::string &name) const; void Rename(const std::string &old_name, const std::string &new_name); diff --git a/paddle/fluid/framework/op_info.cc b/paddle/fluid/framework/op_info.cc index b99e82f8c4..af75baa5c4 100644 --- a/paddle/fluid/framework/op_info.cc +++ b/paddle/fluid/framework/op_info.cc @@ -17,13 +17,12 @@ limitations under the License. */ namespace paddle { namespace framework { -static OpInfoMap* g_op_info_map = nullptr; - +// C++11 removes the need for manual locking. Concurrent execution shall wait if +// a static local variable is already being initialized. +// https://stackoverflow.com/questions/11711920/how-to-implement-multithread-safe-singleton-in-c11-without-using-mutex OpInfoMap& OpInfoMap::Instance() { - if (g_op_info_map == nullptr) { - g_op_info_map = new OpInfoMap(); - } - return *g_op_info_map; + static OpInfoMap g_op_info_map; + return g_op_info_map; } } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_kernel_type.h b/paddle/fluid/framework/op_kernel_type.h index fab20d75f5..c59b232191 100644 --- a/paddle/fluid/framework/op_kernel_type.h +++ b/paddle/fluid/framework/op_kernel_type.h @@ -87,10 +87,17 @@ inline std::string KernelTypeToString(const OpKernelType& kernel_key) { } inline bool NeedTransformLayout(const DataLayout& l, const DataLayout& r) { - return l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r; + bool ret = + (l != DataLayout::kAnyLayout && r != DataLayout::kAnyLayout && l != r); +#ifdef PADDLE_WITH_MKLDNN + // Layout transform needed for either non-MKLDNN to MKLDNN or vice versa + ret |= (l != DataLayout::kMKLDNN && r == DataLayout::kMKLDNN); + ret |= (l == DataLayout::kMKLDNN && r != DataLayout::kMKLDNN); +#endif + return ret; } -inline bool TransFromNeeded(const OpKernelType& l, const OpKernelType& r) { +inline bool NeedTransform(const OpKernelType& l, const OpKernelType& r) { return (!platform::places_are_same_class(l.place_, r.place_)) || (l.data_type_ != r.data_type_) || NeedTransformLayout(l.data_layout_, r.data_layout_); diff --git a/paddle/fluid/framework/op_kernel_type_test.cc b/paddle/fluid/framework/op_kernel_type_test.cc index d37ce149ce..3e17a512ce 100644 --- a/paddle/fluid/framework/op_kernel_type_test.cc +++ b/paddle/fluid/framework/op_kernel_type_test.cc @@ -27,8 +27,15 @@ TEST(OpKernelType, ToString) { LibraryType::kCUDNN); ASSERT_EQ(paddle::framework::KernelTypeToString(op_kernel_type), - "data_type[float32]:data_layout[NCHW]:place[CPUPlace]:library_type[" + "data_type[float]:data_layout[NCHW]:place[CPUPlace]:library_type[" "CUDNN]"); + + using CUDAPlace = paddle::platform::CUDAPlace; + OpKernelType op_kernel_type2(DataType::FP16, CUDAPlace(0), DataLayout::kNCHW, + LibraryType::kCUDNN); + ASSERT_EQ(paddle::framework::KernelTypeToString(op_kernel_type2), + "data_type[float16]:data_layout[NCHW]:place[CUDAPlace(0)]:library_" + "type[CUDNN]"); } TEST(OpKernelType, Hash) { diff --git a/paddle/fluid/framework/op_proto_maker.cc b/paddle/fluid/framework/op_proto_maker.cc index c479d7617c..2288c7fe66 100644 --- a/paddle/fluid/framework/op_proto_maker.cc +++ b/paddle/fluid/framework/op_proto_maker.cc @@ -13,6 +13,7 @@ limitations under the License. */ #include "paddle/fluid/framework/op_proto_maker.h" #include +#include namespace paddle { namespace framework { @@ -20,6 +21,7 @@ namespace framework { void OpProtoAndCheckerMaker::Validate() { validated_ = true; CheckNoDuplicatedInOutAttrs(); + CheckReuseVars(); } OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddInput( @@ -38,6 +40,40 @@ OpProtoAndCheckerMaker::VariableBuilder OpProtoAndCheckerMaker::AddOutput( return OpProtoAndCheckerMaker::VariableBuilder{output}; } +void OpProtoAndCheckerMaker::Reuse(const std::string& name, + const std::string& reused_name) { + bool found = false; + proto::OpProto::Var* var; + + for (auto& var : proto_->inputs()) { + if (var.name() == reused_name) { + found = true; + break; + } + } + PADDLE_ENFORCE(found == true, + "Input/Output name: %s reused_name: %s, one of them is not " + "exists or not matched.", + name, reused_name); + + found = false; + for (int i = 0; i < proto_->outputs().size(); ++i) { + var = proto_->mutable_outputs()->Mutable(i); + if (var->name() == name) { + PADDLE_ENFORCE(!var->has_reuse(), + "Output(%s) has been set reused var of %s", name, + var->reuse()); + found = true; + var->set_reuse(reused_name); + break; + } + } + PADDLE_ENFORCE(found == true, + "Input/Output name: %s reused_name: %s, one of them is not " + "exists or not matched.", + name, reused_name); +} + void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() { std::unordered_set names; auto checker = [&](const std::string& name) { @@ -55,5 +91,46 @@ void OpProtoAndCheckerMaker::CheckNoDuplicatedInOutAttrs() { } } +void OpProtoAndCheckerMaker::CheckReuseVars() { + std::unordered_set names; + for (auto& input : proto_->inputs()) { + names.insert(input.name()); + } + auto checker = [&](const std::string& name, const std::string& reused) { + PADDLE_ENFORCE( + names.count(reused), + "Output [%s] reuse Input [%s], but the input is not registered.", name, + reused); + }; + for (auto& output : proto_->outputs()) { + if (output.has_reuse()) { + checker(output.name(), output.reuse()); + } + } +} + +void OpProtoAndCheckerMaker::operator()(proto::OpProto* proto, + OpAttrChecker* attr_checker) { + proto_ = proto; + op_checker_ = attr_checker; + Make(); + + AddAttr(OpRoleAttrName(), "The role of this operator") + .InEnum( + {static_cast(OpRole::kForward), + static_cast(OpRole::kBackward), + static_cast(OpRole::kOptimize), static_cast(OpRole::kRPC), + static_cast(OpRole::kLoss) | static_cast(OpRole::kForward), + static_cast(OpRole::kLoss) | + static_cast(OpRole::kBackward), + static_cast(OpRole::kNotSpecified)}) + .SetDefault(static_cast(OpRole::kNotSpecified)); + AddAttr>(OpRoleVarAttrName(), + "Optimized for variable") + .SetDefault({}); + + Validate(); +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_proto_maker.h b/paddle/fluid/framework/op_proto_maker.h index 0beb57ce16..80970291c9 100644 --- a/paddle/fluid/framework/op_proto_maker.h +++ b/paddle/fluid/framework/op_proto_maker.h @@ -14,56 +14,77 @@ limitations under the License. */ #pragma once #include +#include + +#include "glog/logging.h" #include "paddle/fluid/framework/attribute.h" #include "paddle/fluid/framework/framework.pb.h" - namespace paddle { namespace framework { +enum class OpRole { + kForward = 0x0000, + kBackward = 0x0001, + kOptimize = 0x0002, + kRPC = 0x0003, + + kLoss = 0x0100, + // The default value of op's role. This should be only used for unittests and + // CreateOp inside a operator. + kNotSpecified = 0x1000, +}; + // this class not only make proto but also init attribute checkers. class OpProtoAndCheckerMaker { public: - using OpProto = proto::OpProto; - using OpAttrChecker = framework::OpAttrChecker; - OpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : proto_(proto), op_checker_(op_checker) {} + static const char *OpRoleAttrName() { return "op_role"; } + static const char *OpRoleVarAttrName() { return "op_role_var"; } + + void operator()(proto::OpProto *proto, OpAttrChecker *attr_checker); + + virtual void Make() = 0; virtual ~OpProtoAndCheckerMaker() { - PADDLE_ENFORCE(validated_, "should call Validate after build"); + CHECK(validated_) << "should call Validate after build"; } - void Validate(); - protected: struct VariableBuilder { - OpProto::Var* var_; + proto::OpProto::Var *var_; - VariableBuilder& AsDuplicable() { + VariableBuilder &AsDuplicable() { var_->set_duplicable(true); return *this; } - VariableBuilder& AsIntermediate() { + VariableBuilder &AsIntermediate() { var_->set_intermediate(true); return *this; } - VariableBuilder& AsDispensable() { + VariableBuilder &AsDispensable() { var_->set_dispensable(true); return *this; } + + VariableBuilder &Reuse(const std::string &name) { + var_->set_reuse(name); + return *this; + } }; - VariableBuilder AddInput(const std::string& name, const std::string& comment); + VariableBuilder AddInput(const std::string &name, const std::string &comment); + + VariableBuilder AddOutput(const std::string &name, + const std::string &comment); - VariableBuilder AddOutput(const std::string& name, - const std::string& comment); + void Reuse(const std::string &name, const std::string &reused_name); template - TypedAttrChecker& AddAttr(const std::string& name, - const std::string& comment, + TypedAttrChecker &AddAttr(const std::string &name, + const std::string &comment, bool generated = false) { - auto* attr = proto_->add_attrs(); + auto *attr = proto_->add_attrs(); attr->set_name(name); attr->set_comment(comment); attr->set_generated(generated); @@ -71,21 +92,17 @@ class OpProtoAndCheckerMaker { return op_checker_->AddAttrChecker(name); } - void AddComment(const std::string& comment) { proto_->set_comment(comment); } + void AddComment(const std::string &comment) { proto_->set_comment(comment); } private: void CheckNoDuplicatedInOutAttrs(); + void Validate(); - OpProto* proto_; - OpAttrChecker* op_checker_; - bool validated_{false}; -}; + void CheckReuseVars(); -class NOPMaker : public OpProtoAndCheckerMaker { - public: - NOPMaker(OpProto* proto, framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) {} + proto::OpProto *proto_; + OpAttrChecker *op_checker_; + bool validated_{false}; }; - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/op_proto_maker_test.cc b/paddle/fluid/framework/op_proto_maker_test.cc index a8d8c6386a..b71c7b6468 100644 --- a/paddle/fluid/framework/op_proto_maker_test.cc +++ b/paddle/fluid/framework/op_proto_maker_test.cc @@ -18,9 +18,7 @@ limitations under the License. */ class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { public: - TestAttrProtoMaker(paddle::framework::proto::OpProto* proto, - paddle::framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddAttr("scale", "scale of test op"); AddAttr("scale", "scale of test op"); } @@ -29,15 +27,14 @@ class TestAttrProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { TEST(ProtoMaker, DuplicatedAttr) { paddle::framework::proto::OpProto op_proto; paddle::framework::OpAttrChecker op_checker; - auto proto_maker = TestAttrProtoMaker(&op_proto, &op_checker); - ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); + TestAttrProtoMaker proto_maker; + ASSERT_THROW(proto_maker(&op_proto, &op_checker), + paddle::platform::EnforceNotMet); } class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { public: - TestInOutProtoMaker(paddle::framework::proto::OpProto* proto, - paddle::framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of test op"); AddInput("input", "input of test op"); } @@ -46,6 +43,124 @@ class TestInOutProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { TEST(ProtoMaker, DuplicatedInOut) { paddle::framework::proto::OpProto op_proto; paddle::framework::OpAttrChecker op_checker; - auto proto_maker = TestInOutProtoMaker(&op_proto, &op_checker); - ASSERT_THROW(proto_maker.Validate(), paddle::platform::EnforceNotMet); + TestAttrProtoMaker proto_maker; + ASSERT_THROW(proto_maker(&op_proto, &op_checker), + paddle::platform::EnforceNotMet); } + +class TestInplaceProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { + public: + void Make() { + AddInput("X", "input of test op"); + AddOutput("XOut", "output of test op").Reuse("X"); + } +}; + +class TestInplaceProtoMaker2 + : public paddle::framework::OpProtoAndCheckerMaker { + public: + void Make() { + AddInput("X", "input of test op"); + AddOutput("XOut", "output of test op").Reuse("X"); + AddOutput("NoOut", "output of test op").Reuse("NotExists"); + } +}; + +TEST(ProtoMaker, InplaceOutput) { + paddle::framework::proto::OpProto op_proto, op_proto2; + paddle::framework::OpAttrChecker op_checker; + TestInplaceProtoMaker proto_maker; + TestInplaceProtoMaker2 proto_maker2; + + proto_maker(&op_proto, &op_checker); + + ASSERT_THROW(proto_maker2(&op_proto2, &op_checker), + paddle::platform::EnforceNotMet); +} + +// normal reuse +class TestReuseProtoMaker : public paddle::framework::OpProtoAndCheckerMaker { + public: + void Make() { + AddInput("X", "input of test op"); + AddInput("Y", "input of test op"); + AddOutput("Out", "output of test op"); + AddOutput("XOut", "output of test op"); + // avoid destructor exception. + // Validate(); + TestReuse(); + } + + virtual void TestReuse() {} +}; + +// test duplicate reuse error +class TestReuseProtoMaker2 : public TestReuseProtoMaker { + public: + void TestReuse() { + Reuse("Out", "X"); + Reuse("Out", "Y"); + } +}; + +// NotExists Input +class TestReuseProtoMaker3 : public TestReuseProtoMaker { + public: + void TestReuse() { + Reuse("Out", "NotExists"); + Reuse("XOut", "X"); + } +}; + +// NotExists Output +class TestReuseProtoMaker4 : public TestReuseProtoMaker { + public: + void TestReuse() { Reuse("NotExists", "X"); } +}; + +TEST(ProtoMaker, Reuse) { + paddle::framework::proto::OpProto op_proto; + paddle::framework::OpAttrChecker op_checker; + TestReuseProtoMaker proto_maker; + proto_maker(&op_proto, &op_checker); +} + +// NOTE(dzhwinter): +// There is a Fatal CHECK on base class destructor, which will call abort inside +// instead of +// throw an exception. If we throw an exception in Make(), we will trigger the +// CHECK and terminate the tests. +// +// I had tried to replace the default CHECK with a exception, however, it's +// still not supported by glog. +// the details: +// https://github.com/google/glog/issues/249 +// https://github.com/facebookresearch/TensorComprehensions/issues/351 +/* +TEST(ProtoMaker, ReuseWithException) { + paddle::framework::proto::OpProto op_proto2, op_proto3, op_proto4; + paddle::framework::OpAttrChecker op_checker; + TestReuseProtoMaker2 proto_maker2; + TestReuseProtoMaker3 proto_maker3; + TestReuseProtoMaker4 proto_maker4; + EXPECT_THROW(proto_maker2(&op_proto2, &op_checker), + paddle::platform::EnforceNotMet); + + EXPECT_THROW(proto_maker3(&op_proto3, &op_checker), + paddle::platform::EnforceNotMet); + + EXPECT_THROW(proto_maker4(&op_proto4, &op_checker), + paddle::platform::EnforceNotMet); +} + +void FailureFunction() { + throw std::runtime_error("Check failed in destructor."); + // return 0; +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + google::InstallFailureFunction(&FailureFunction); + return RUN_ALL_TESTS(); +} +*/ diff --git a/paddle/fluid/framework/op_registry.h b/paddle/fluid/framework/op_registry.h index 748317438b..e7dfa608b4 100644 --- a/paddle/fluid/framework/op_registry.h +++ b/paddle/fluid/framework/op_registry.h @@ -76,6 +76,20 @@ class OpRegistry { template struct OpKernelRegistrarFunctor; +template +inline void RegisterKernelClass(const char* op_type, const char* library_type, + Func func) { + std::string library(library_type); + std::string data_layout = "ANYLAYOUT"; + if (library == "MKLDNN") { + data_layout = "MKLDNNLAYOUT"; + } + OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType(), + StringToDataLayout(data_layout), + StringToLibraryType(library_type)); + OperatorWithKernel::AllOpKernels()[op_type][key] = func; +} + template struct OpKernelRegistrarFunctor { using KERNEL_TYPE = @@ -83,10 +97,10 @@ struct OpKernelRegistrarFunctor { void operator()(const char* op_type, const char* library_type) const { using T = typename KERNEL_TYPE::ELEMENT_TYPE; - OpKernelType key(ToDataType(std::type_index(typeid(T))), PlaceType(), - DataLayout::kAnyLayout, StringToLibraryType(library_type)); - OperatorWithKernel::AllOpKernels()[op_type][key].reset(new KERNEL_TYPE); - + RegisterKernelClass( + op_type, library_type, [](const framework::ExecutionContext& ctx) { + KERNEL_TYPE().Compute(ctx); + }); constexpr auto size = std::tuple_size>::value; OpKernelRegistrarFunctor func; @@ -99,7 +113,8 @@ struct OpKernelRegistrarFunctor { void operator()(const char* op_type, const char* library_type) const {} }; -// User can register many kernel in one place. The data type could be different. +// User can register many kernel in one place. The data type could be +// different. template class OpKernelRegistrar : public Registrar { public: @@ -109,6 +124,47 @@ class OpKernelRegistrar : public Registrar { } }; +template +struct OpKernelRegistrarFunctorEx; + +template +class OpKernelRegistrarEx : public Registrar { + public: + explicit OpKernelRegistrarEx(const char* op_type, const char* library_type) { + OpKernelRegistrarFunctorEx + func; + func(op_type, library_type); + } +}; + +template +struct OpKernelRegistrarFunctorEx { + void operator()(const char* op_type, const char* library_type) const {} +}; + +template +struct OpKernelRegistrarFunctorEx { + using Functor = + typename std::tuple_element>::type; + using T = + typename std::tuple_element>::type; + + void operator()(const char* op_type, const char* library_type) const { + RegisterKernelClass(op_type, library_type, Functor()); + + constexpr auto size = + std::tuple_size>::value; + OpKernelRegistrarFunctorEx= size, I + 2, + DataTypeAndKernelType...> + func; + func(op_type, library_type); + } +}; + /** * check if MACRO is used in GLOBAL NAMESPACE. */ @@ -126,21 +182,15 @@ class OpKernelRegistrar : public Registrar { VarTypeInference InferShapeBase */ -#define REGISTER_OPERATOR(op_type, op_class, ...) \ - STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_op__##op_type, \ - "REGISTER_OPERATOR must be called in global namespace"); \ - class _OpClass_##op_type##_ : public op_class { \ - public: \ - DEFINE_OP_CLONE_METHOD(_OpClass_##op_type##_); \ - DEFINE_OP_CONSTRUCTOR(_OpClass_##op_type##_, op_class); \ - }; \ - static ::paddle::framework::OperatorRegistrar<_OpClass_##op_type##_, \ - ##__VA_ARGS__> \ - __op_registrar_##op_type##__(#op_type); \ - int TouchOpRegistrar_##op_type() { \ - __op_registrar_##op_type##__.Touch(); \ - return 0; \ +#define REGISTER_OPERATOR(op_type, op_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op__##op_type, \ + "REGISTER_OPERATOR must be called in global namespace"); \ + static ::paddle::framework::OperatorRegistrar \ + __op_registrar_##op_type##__(#op_type); \ + int TouchOpRegistrar_##op_type() { \ + __op_registrar_##op_type##__.Touch(); \ + return 0; \ } #define REGISTER_OP_WITHOUT_GRADIENT(op_type, op_class, op_maker_class) \ @@ -149,15 +199,15 @@ class OpKernelRegistrar : public Registrar { /** * Macro to register OperatorKernel. */ -#define REGISTER_OP_KERNEL(op_type, LIBRARY_TYPE, place_class, ...) \ +#define REGISTER_OP_KERNEL(op_type, library_type, place_class, ...) \ STATIC_ASSERT_GLOBAL_NAMESPACE( \ - __reg_op_kernel_##op_type##_##LIBRARY_TYPE##__, \ + __reg_op_kernel_##op_type##_##library_type##__, \ "REGISTER_OP_KERNEL must be called in global namespace"); \ static ::paddle::framework::OpKernelRegistrar \ - __op_kernel_registrar_##op_type##_##LIBRARY_TYPE##__(#op_type, \ - #LIBRARY_TYPE); \ - int TouchOpKernelRegistrar_##op_type##_##LIBRARY_TYPE() { \ - __op_kernel_registrar_##op_type##_##LIBRARY_TYPE##__.Touch(); \ + __op_kernel_registrar_##op_type##_##library_type##__(#op_type, \ + #library_type); \ + int TouchOpKernelRegistrar_##op_type##_##library_type() { \ + __op_kernel_registrar_##op_type##_##library_type##__.Touch(); \ return 0; \ } @@ -167,6 +217,25 @@ class OpKernelRegistrar : public Registrar { #define REGISTER_OP_CPU_KERNEL(op_type, ...) \ REGISTER_OP_KERNEL(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__) +#define REGISTER_OP_KERNEL_EX(op_type, library_type, place_class, ...) \ + STATIC_ASSERT_GLOBAL_NAMESPACE( \ + __reg_op_kernel_##op_type##_##library_type##__, \ + "REGISTER_OP_KERNEL_EX must be called in global namespace"); \ + static ::paddle::framework::OpKernelRegistrarEx \ + __op_kernel_registrar_##op_type##_##library_type##__(#op_type, \ + #library_type); \ + int TouchOpKernelRegistrar_##op_type##_##library_type() { \ + __op_kernel_registrar_##op_type##_##library_type##__.Touch(); \ + return 0; \ + } + +#define REGISTER_OP_CUDA_KERNEL_FUNCTOR(op_type, ...) \ + REGISTER_OP_KERNEL_EX(op_type, CUDA, ::paddle::platform::CUDAPlace, \ + __VA_ARGS__) + +#define REGISTER_OP_CPU_KERNEL_FUNCTOR(op_type, ...) \ + REGISTER_OP_KERNEL_EX(op_type, CPU, ::paddle::platform::CPUPlace, __VA_ARGS__) + /** * Macro to mark what Operator and Kernel * we will use and tell the compiler to diff --git a/paddle/fluid/framework/op_registry_test.cc b/paddle/fluid/framework/op_registry_test.cc index 6dc4cf261b..04996d7b09 100644 --- a/paddle/fluid/framework/op_registry_test.cc +++ b/paddle/fluid/framework/op_registry_test.cc @@ -33,8 +33,7 @@ class CosineOp : public OperatorBase { class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - CosineOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of cosine op"); AddOutput("output", "output of cosine op"); AddAttr("scale", "scale of cosine op") @@ -55,8 +54,7 @@ class MyTestOp : public OperatorBase { class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - MyTestOpProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of cosine op").AsDuplicable(); AddOutput("output", "output of cosine op").AsIntermediate(); auto my_checker = [](int i) { @@ -195,15 +193,10 @@ TEST(OpRegistry, CustomChecker) { ASSERT_EQ(test_attr, 4); } -class CosineOpComplete : public paddle::framework::CosineOp { - public: - DEFINE_OP_CONSTRUCTOR(CosineOpComplete, paddle::framework::CosineOp); - DEFINE_OP_CLONE_METHOD(CosineOpComplete); -}; - TEST(OperatorRegistrar, Test) { paddle::framework::OperatorRegistrar< - CosineOpComplete, paddle::framework::CosineOpProtoAndCheckerMaker> + paddle::framework::CosineOp, + paddle::framework::CosineOpProtoAndCheckerMaker> reg("cos"); } @@ -212,10 +205,7 @@ namespace framework { class OpKernelTestMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddComment("NoGradOp, same input output. no Grad"); - } + void Make() { AddComment("NoGradOp, same input output. no Grad"); } }; class OpWithKernelTest : public OperatorWithKernel { @@ -275,9 +265,9 @@ TEST(OperatorRegistrar, CUDA) { static int op_test_value = 0; -using paddle::platform::DeviceContext; using paddle::platform::CPUDeviceContext; using paddle::platform::CUDADeviceContext; +using paddle::platform::DeviceContext; namespace paddle { namespace framework { diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index d70f26026c..d04f774496 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -18,12 +18,16 @@ limitations under the License. */ #include "paddle/fluid/framework/data_transform.h" #include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/operator.h" #include "paddle/fluid/framework/shape_inference.h" #include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/platform/profiler.h" DECLARE_bool(benchmark); +DEFINE_bool(check_nan_inf, false, + "Checking whether operator produce NAN/INF or not. It will be " + "extremely slow so please use this flag wisely."); namespace paddle { namespace framework { @@ -54,7 +58,11 @@ static DDim GetDims(const Scope& scope, const std::string& name, } if (var->IsType()) { - return var->Get().dims(); + const LoDTensor& tensor = var->Get(); + if (UNLIKELY(!tensor.IsInitialized())) { + return DDim({-1}); + } + return tensor.dims(); } else if (var->IsType()) { if (get_actual_dim) { return var->Get().value().dims(); @@ -66,6 +74,39 @@ static DDim GetDims(const Scope& scope, const std::string& name, } } +static std::string GetDtype(const Scope& scope, const std::string& name) { + Variable* var = scope.FindVar(name); + if (var == nullptr) { + return ""; + } + + if (var->IsType()) { + const LoDTensor& tensor = var->Get(); + if (UNLIKELY(!tensor.IsInitialized())) { + return ""; + } + return DataTypeToString(ToDataType(tensor.type())); + } else if (var->IsType()) { + return DataTypeToString( + ToDataType(var->Get().value().type())); + } else { + return ""; + } +} + +static int GetRowSize(const Scope& scope, const std::string& name) { + Variable* var = scope.FindVar(name); + if (var == nullptr) { + return -1; + } + + if (var->IsType()) { + return var->Get().rows().size(); + } + + return -1; +} + static LoD GetLoD(const Scope& scope, const std::string& name) { Variable* var = scope.FindVar(name); auto default_lod = LoD({{}}); @@ -75,13 +116,18 @@ static LoD GetLoD(const Scope& scope, const std::string& name) { } if (var->IsType()) { - return var->Get().lod(); + const LoDTensor& tensor = var->Get(); + if (UNLIKELY(!tensor.IsInitialized())) { + return default_lod; + } + return tensor.lod(); } else { return default_lod; } } void OperatorBase::Run(const Scope& scope, const platform::Place& place) { + VLOG(4) << place << " " << DebugStringEx(&scope); if (platform::is_gpu_place(place)) { #ifndef PADDLE_WITH_CUDA PADDLE_THROW("Cannot run operator on place %s", place); @@ -90,7 +136,10 @@ void OperatorBase::Run(const Scope& scope, const platform::Place& place) { platform::SetDeviceId(dev_id); #endif } + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + platform::RecordEvent record_event(Type(), pool.Get(place)); RunImpl(scope, place); + VLOG(3) << place << " " << DebugStringEx(&scope); } bool OperatorBase::HasInputs(const std::string& name) const { @@ -150,6 +199,12 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < input.second.size(); ++i) { ss << input.second[i]; if (scope) { + int row_size = GetRowSize(*scope, input.second[i]); + if (row_size >= 0) { + ss << "[row_size=" << row_size << "]"; + } + std::string dtype = GetDtype(*scope, input.second[i]); + ss << ":" << dtype; ss << "[" << GetDims(*scope, input.second[i], true) << "]"; ss << "(" << GetLoD(*scope, input.second[i]) << ")"; } @@ -170,6 +225,10 @@ std::string OperatorBase::DebugStringEx(const Scope* scope) const { for (size_t i = 0; i < output.second.size(); ++i) { ss << output.second[i]; if (scope) { + int row_size = GetRowSize(*scope, output.second[i]); + if (row_size >= 0) { + ss << "[row_size=" << row_size << "]"; + } ss << "[" << GetDims(*scope, output.second[i], true) << "]"; ss << "(" << GetLoD(*scope, output.second[i]) << ")"; } @@ -290,6 +349,38 @@ static Tensor* GetMutableTensorFromVar(Variable* var) { } } +bool ExecutionContext::HasInput(const std::string& name) const { + if (!op_.HasInputs(name)) { + return false; + } + auto& ins = Inputs(name); + size_t length = ins.size(); + if (length == 0) { + return false; + } + PADDLE_ENFORCE_EQ(length, 1UL, + "Input %s should not have more than one inputs", name); + auto arg = ins[0]; + auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg); + return var != nullptr; +} + +bool ExecutionContext::HasOutput(const std::string& name) const { + if (!op_.HasOutputs(name)) { + return false; + } + auto& outs = Outputs(name); + size_t length = outs.size(); + if (length == 0) { + return false; + } + PADDLE_ENFORCE_EQ(length, 1UL, + "Output %s should not have more than one inputs", name); + auto arg = outs[0]; + auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg); + return var != nullptr; +} + template <> const Tensor* ExecutionContext::Input(const std::string& name) const { auto* var = InputVar(name); @@ -441,10 +532,25 @@ class RuntimeInferShapeContext : public InferShapeContext { auto* out_tensor = out_var->GetMutable(); out_tensor->set_lod(in_tensor.lod()); - // TODO(dzhwinter) : reuse ShareLoD in most operators. - // Need to call ShareLayout explicitly in sequence related ops. - // Shall we have a better method to shared info between in/out Tensor? - out_tensor->set_layout(in_tensor.layout()); +// TODO(dzhwinter) : reuse ShareLoD in most operators. +// Need to call ShareLayout explicitly in sequence related ops. +// Shall we have a better method to shared info between in/out Tensor? +#ifdef PADDLE_WITH_MKLDNN + // Fix me: ugly workaround below + // Correct solution: + // set_layout() should NOT be called here (i.e. ShareLoD). Instead, + // layout of output tensor should be set "manually" in Compute() + // of each OPKernel. The reason layout should NOT be shared between + // input and output "automatically" (now by InferShape()->ShareLoD()) + // is that layout transform may occur after InferShape(). + // Workaround: + // Skip set_layout() when input layout is kMKLDNN + // This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN + // OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called + // in Compute() + if (in_tensor.layout() != DataLayout::kMKLDNN) +#endif + out_tensor->set_layout(in_tensor.layout()); } void ShareLayout(const std::string& in, const std::string& out, size_t i = 0, @@ -466,6 +572,7 @@ class RuntimeInferShapeContext : public InferShapeContext { protected: DDim GetDim(const std::string& name) const override { Variable* var = scope_.FindVar(name); + PADDLE_ENFORCE_NOT_NULL(var); if (var->IsType()) { return var->Get().dims(); } else if (var->IsType()) { @@ -513,6 +620,20 @@ class RuntimeInferShapeContext : public InferShapeContext { const Scope& scope_; }; +static void CheckTensorNANOrInf(const std::string& name, + const framework::Tensor& tensor) { + if (tensor.memory_size() == 0) { + return; + } + if (!IsType(tensor.type()) && !IsType(tensor.type())) { + return; + } + PADDLE_ENFORCE(!framework::TensorContainsInf(tensor), + "Tensor %s contains Inf", name); + PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor), + "Tensor %s contains NAN", name); +} + void OperatorWithKernel::RunImpl(const Scope& scope, const platform::Place& place) const { RuntimeInferShapeContext infer_shape_ctx(*this, scope); @@ -520,9 +641,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope, platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto* dev_ctx = pool.Get(place); - // For profiling, don't move out of this function because that will result - // in the failure of multi-GPU profiling. - platform::RecordEvent record_event(Type(), dev_ctx); // check if op[type] has kernel registered. auto& all_op_kernels = AllOpKernels(); auto kernels_iter = all_op_kernels.find(type_); @@ -531,8 +649,6 @@ void OperatorWithKernel::RunImpl(const Scope& scope, "There are no kernels which are registered in the %s operator.", type_); } - ExecutionContext ctx(*this, scope, *dev_ctx); - OpKernelMap& kernels = kernels_iter->second; // TODO(dzhwinter) : kernel fallback mechanism will be added when all the @@ -542,67 +658,127 @@ void OperatorWithKernel::RunImpl(const Scope& scope, // Do selection // } - auto expected_kernel_key = this->GetExpectedKernelType(ctx); + auto expected_kernel_key = + this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx)); VLOG(3) << "expected_kernel_key:" << expected_kernel_key; auto kernel_iter = kernels.find(expected_kernel_key); +#ifdef PADDLE_WITH_MKLDNN + // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set + if (kernel_iter == kernels.end() && + expected_kernel_key.library_type_ == LibraryType::kMKLDNN) { + VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one"; + expected_kernel_key.library_type_ = LibraryType::kPlain; + expected_kernel_key.data_layout_ = DataLayout::kAnyLayout; + kernel_iter = kernels.find(expected_kernel_key); + } +#endif if (kernel_iter == kernels.end()) { PADDLE_THROW("op %s does not have kernel for %s", type_, KernelTypeToString(expected_kernel_key)); } - // do data transform - Scope& new_scope = scope.NewScope(); + // do data transformScope &transfer_scope; + std::vector transfered_inplace_vars; + auto* transfer_scope = + TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars); - std::vector inplace_vars; - for (auto& var_name_item : this->Inputs()) { - for (auto& var_name : var_name_item.second) { - auto* var = scope.FindVar(var_name); - if (var && VarIsTensor(var)) { - auto* tensor_in = GetTensorFromVar(var); - if (tensor_in->IsInitialized()) { - auto kernel_type_for_var = this->GetKernelTypeForVar( - var_name_item.first, *tensor_in, expected_kernel_key); - if (TransFromNeeded(kernel_type_for_var, expected_kernel_key)) { - auto out_var_names = OutputVars(true); - if (std::find(out_var_names.begin(), out_var_names.end(), - var_name) != out_var_names.end()) { - inplace_vars.push_back(var_name); - } - VLOG(3) << "Transform Variable " << var_name << " from " - << kernel_type_for_var << " to " << expected_kernel_key; - auto* trans_var = new_scope.Var(var_name); - std::shared_ptr out(new Tensor); - DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in, - out.get()); - CopyVariableWithTensor(*var, *(out.get()), trans_var); - } - } - } - } + // exec scope is the scope that kernel actually executed on. + const Scope& exec_scope = + (transfer_scope == nullptr ? scope : *transfer_scope); + + if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) { + dev_ctx = pool.Get(expected_kernel_key.place_); } - auto* new_dev_ctx = pool.Get(expected_kernel_key.place_); - kernel_iter->second->Compute( - ExecutionContext(*this, new_scope, *new_dev_ctx)); + kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx)); + + if (!transfered_inplace_vars.empty()) { + // there is inplace variable has been transfered. + TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope); + } + + /*For profiling/benchmark only*/ + if (FLAGS_benchmark) { + dev_ctx->Wait(); + } + if (FLAGS_check_nan_inf) { + for (auto& vname : OutputVars(true)) { + auto* var = exec_scope.FindVar(vname); + if (var == nullptr) continue; + if (var->IsType()) { + CheckTensorNANOrInf(vname, var->Get()); + } else if (var->IsType()) { + CheckTensorNANOrInf(vname, var->Get().value()); + } + } + } +} +void OperatorWithKernel::TransferInplaceVarsBack( + const Scope& scope, const std::vector& inplace_vars, + const Scope& transfer_scope) const { for (auto& var_name : inplace_vars) { VLOG(3) << "share inplace var " + var_name + " back to it's original scope"; auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name)); - auto* transformed_tensor = GetTensorFromVar(new_scope.FindVar(var_name)); + auto* transformed_tensor = + GetTensorFromVar(transfer_scope.FindVar(var_name)); original_tensor->ShareDataWith(*transformed_tensor); } +} - /*For profiling/benchmark only*/ - if (FLAGS_benchmark) { - new_dev_ctx->Wait(); +Scope* OperatorWithKernel::TryTransferData( + const Scope& scope, const OpKernelType& expected_kernel_key, + std::vector* transfered_inplace_vars) const { + Scope* new_scope = nullptr; + for (auto& var_name_item : Inputs()) { + for (auto& var_name : var_name_item.second) { + auto* var = scope.FindVar(var_name); + // Only tensor can be tranfer to another device. + if (var == nullptr || !VarIsTensor(var)) { + continue; + } + + auto* tensor_in = GetTensorFromVar(var); + if (!tensor_in->IsInitialized()) { + continue; + } + + auto kernel_type_for_var = GetKernelTypeForVar( + var_name_item.first, *tensor_in, expected_kernel_key); + + if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) { + continue; + } + + auto out_var_names = OutputVars(true); + if (std::find(out_var_names.begin(), out_var_names.end(), var_name) != + out_var_names.end()) { + transfered_inplace_vars->emplace_back(var_name); + } + + VLOG(3) << "Transform Variable " << var_name << " from " + << kernel_type_for_var << " to " << expected_kernel_key; + + if (new_scope == nullptr) { + new_scope = &scope.NewScope(); + } + + auto* trans_var = new_scope->Var(var_name); + Tensor out; + TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out); + SetTensorToVariable(*var, out, trans_var); + } } + + return new_scope; } proto::VarType::Type OperatorWithKernel::IndicateDataType( const ExecutionContext& ctx) const { auto& scope = ctx.scope(); int data_type = -1; + std::string last_input_name; for (auto& input : this->inputs_) { for (auto& ipt_name : input.second) { auto* var = scope.FindVar(ipt_name); @@ -617,9 +793,12 @@ proto::VarType::Type OperatorWithKernel::IndicateDataType( } if (t != nullptr) { int tmp = static_cast(ToDataType(t->type())); - PADDLE_ENFORCE(tmp == data_type || data_type == -1, - "DataType of Paddle Op %s must be the same.", Type()); + PADDLE_ENFORCE( + tmp == data_type || data_type == -1, + "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)", + Type(), last_input_name, data_type, ipt_name, tmp); data_type = tmp; + last_input_name = ipt_name; } } } @@ -636,7 +815,8 @@ OpKernelType OperatorWithKernel::GetExpectedKernelType( OpKernelType OperatorWithKernel::GetKernelTypeForVar( const std::string& var_name, const Tensor& tensor, const OpKernelType& expected_kernel_type) const { - return OpKernelType(expected_kernel_type.data_type_, tensor.place()); + return OpKernelType(expected_kernel_type.data_type_, tensor.place(), + tensor.layout()); } } // namespace framework diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index d373c48b1a..1040eb882b 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -33,7 +33,6 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/variant.h" -#include "paddle/utils/Error.h" namespace paddle { namespace framework { @@ -122,10 +121,6 @@ class OperatorBase { //! Get all outputs variable names virtual std::vector OutputVars(bool has_intermediate) const; - // Return a new operator instance, which is as same as this. - // Use unique_ptr to prevent caller forget to delete this pointer. - virtual std::unique_ptr Clone() const = 0; - protected: std::string type_; // NOTE: in case of OpGrad, inputs_ contains: @@ -146,37 +141,6 @@ class OperatorBase { const platform::Place& place) const = 0; }; -// Macro for define a clone method. -// If you are writing an kernel operator, `Clone` will be defined when you -// register it. i.e. `Clone` method is not needed to define by yourself. -#define DEFINE_OP_CLONE_METHOD(cls) \ - std::unique_ptr<::paddle::framework::OperatorBase> Clone() const final { \ - return std::unique_ptr<::paddle::framework::OperatorBase>(new cls(*this)); \ - } - -// Macro for define a default constructor for Operator. -// You can also use -// using PARENT_CLASS::PARENT_CLASS; -// to use parent's constructor. -#define DEFINE_OP_CONSTRUCTOR(cls, parent_cls) \ - cls(const std::string& type, \ - const ::paddle::framework::VariableNameMap& inputs, \ - const ::paddle::framework::VariableNameMap& outputs, \ - const paddle::framework::AttributeMap& attrs) \ - : parent_cls(type, inputs, outputs, attrs) {} - -class NOP : public OperatorBase { - public: - using OperatorBase::OperatorBase; - std::unique_ptr Clone() const override { - return std::unique_ptr(new NOP(*this)); - } - - private: - void RunImpl(const Scope& scope, - const platform::Place& place) const override {} -}; - class ExecutionContext { public: ExecutionContext(const OperatorBase& op, const Scope& scope, @@ -192,6 +156,10 @@ class ExecutionContext { return op_.Attr(name); } + bool HasInput(const std::string& name) const; + + bool HasOutput(const std::string& name) const; + size_t InputSize(const std::string& name) const { return op_.Inputs(name).size(); } @@ -344,9 +312,9 @@ class OpKernel : public OpKernelBase { class OperatorWithKernel : public OperatorBase { public: + using OpKernelFunc = std::function; using OpKernelMap = - std::unordered_map, - OpKernelType::Hash>; + std::unordered_map; OperatorWithKernel(const std::string& type, const VariableNameMap& inputs, const VariableNameMap& outputs, const AttributeMap& attrs) @@ -381,6 +349,20 @@ class OperatorWithKernel : public OperatorBase { // same. proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const; void RunImpl(const Scope& scope, const platform::Place& place) const final; + + /** + * Transfer data from scope to a transfered scope. If there is no data need to + * be tranfered, it returns nullptr. + * + * * transfered_inplace_vars is a output vector. + */ + Scope* TryTransferData( + const Scope& scope, const OpKernelType& expected_kernel_key, + std::vector* transfered_inplace_vars) const; + + void TransferInplaceVarsBack(const Scope& scope, + const std::vector& inplace_vars, + const Scope& exec_scope) const; }; extern bool OpSupportGPU(const std::string& op_type); diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index 1bf8c81469..ac9dd8245a 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace framework { @@ -46,8 +46,7 @@ class OpWithoutKernelTest : public OperatorBase { class OpWithoutKernelCheckerMaker : public OpProtoAndCheckerMaker { public: - OpWithoutKernelCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("input", "input of test op"); AddOutput("output", "output of test op"); AddAttr("scale", "scale of cosine op"); @@ -98,8 +97,7 @@ namespace framework { class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("x", "input of test op"); AddOutput("y", "output of test op"); AddAttr("scale", "scale of cosine op") @@ -137,9 +135,7 @@ class CPUKernelTest : public OpKernel { class OpKernelTestMultiInputsProtoAndCheckerMaker : public OpProtoAndCheckerMaker { public: - OpKernelTestMultiInputsProtoAndCheckerMaker(OpProto* proto, - OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("xs", "inputs of test op").AsDuplicable(); AddInput("k", "input of test op"); AddOutput("ys", "outputs of test op").AsDuplicable(); @@ -251,26 +247,3 @@ TEST(OpKernel, multi_inputs) { auto op = paddle::framework::OpRegistry::CreateOp(op_desc); op->Run(scope, cpu_place); } - -class OperatorClone : public paddle::framework::OperatorBase { - public: - DEFINE_OP_CLONE_METHOD(OperatorClone); - OperatorClone(const std::string& type, - const paddle::framework::VariableNameMap& inputs, - const paddle::framework::VariableNameMap& outputs, - const paddle::framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - - private: - void RunImpl(const paddle::framework::Scope& scope, - const paddle::platform::Place& place) const override {} -}; - -TEST(Operator, Clone) { - paddle::framework::InitDevices(true); - OperatorClone a("ABC", paddle::framework::VariableNameMap{}, - paddle::framework::VariableNameMap{}, - paddle::framework::AttributeMap{}); - auto b = a.Clone(); - ASSERT_EQ(a.Type(), b->Type()); -} diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 9eea8d1c18..275cb8c592 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -18,17 +18,81 @@ limitations under the License. */ #include #include +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/ir/graph_viz_pass.h" + #ifdef PADDLE_WITH_CUDA #include "paddle/fluid/platform/nccl_helper.h" #endif -#include "paddle/fluid/framework/details/multi_devices_graph_builder.h" +#include "paddle/fluid/framework/details/multi_devices_graph_check_pass.h" +#include "paddle/fluid/framework/details/multi_devices_graph_print_pass.h" +#include "paddle/fluid/framework/details/scope_buffered_ssa_graph_executor.h" #include "paddle/fluid/framework/details/threaded_ssa_graph_executor.h" #include "paddle/fluid/platform/profiler.h" namespace paddle { namespace framework { +std::unique_ptr ApplyParallelExecutorPass( + const ProgramDesc &main_program, const std::vector &places, + const std::string &loss_var_name, + const std::unordered_set ¶m_names, + const std::vector &local_scopes, const bool use_cuda, +#ifdef PADDLE_WITH_CUDA + const BuildStrategy &strategy, platform::NCCLContextMap *nccl_ctxs) { +#else + const BuildStrategy &strategy) { +#endif + // Convert the program to graph. + std::unique_ptr graph(new ir::Graph(main_program)); + + // Apply a graph viz pass to record a graph. + if (!strategy.debug_graphviz_path_.empty()) { + auto viz_pass = ir::PassRegistry::Instance().Get("graph_viz_pass"); + const std::string graph_path = string::Sprintf( + "%s%s", strategy.debug_graphviz_path_.c_str(), "_original_graph"); + viz_pass->Set("graph_viz_path", new std::string(graph_path)); + graph = viz_pass->Apply(std::move(graph)); + } + + // Convert graph to run on multi-devices. + auto multi_devices_pass = + ir::PassRegistry::Instance().Get("multi_devices_pass"); + multi_devices_pass->SetNotOwned>("places", + &places); + multi_devices_pass->SetNotOwned("loss_var_name", + &loss_var_name); + multi_devices_pass->SetNotOwned>( + "params", ¶m_names); + multi_devices_pass->SetNotOwned>("local_scopes", + &local_scopes); + multi_devices_pass->SetNotOwned("strategy", &strategy); + +#ifdef PADDLE_WITH_CUDA + platform::NCCLContextMap *nctx = use_cuda ? nccl_ctxs : nullptr; + multi_devices_pass->SetNotOwned("nccl_ctxs", nctx); +#endif + graph = multi_devices_pass->Apply(std::move(graph)); + + // Apply a graph print pass to record a graph with device info. + if (!strategy.debug_graphviz_path_.empty()) { + auto multi_devices_print_pass = + ir::PassRegistry::Instance().Get("multi_devices_print_pass"); + multi_devices_print_pass->SetNotOwned( + "debug_graphviz_path", &strategy.debug_graphviz_path_); + multi_devices_print_pass->Set( + "graph_printer", new details::GraphvizSSAGraphPrinter); + graph = multi_devices_print_pass->Apply(std::move(graph)); + } + + // Verify that the graph is correct for multi-device executor. + auto multi_devices_check_pass = + ir::PassRegistry::Instance().Get("multi_devices_check_pass"); + graph = multi_devices_check_pass->Apply(std::move(graph)); + return graph; +} + class ParallelExecutorPrivate { public: explicit ParallelExecutorPrivate(const std::vector &places) @@ -42,9 +106,9 @@ class ParallelExecutorPrivate { #ifdef PADDLE_WITH_CUDA std::unique_ptr nccl_ctxs_; #endif - - std::vector> var_types_; - bool own_local_scope; + bool own_local_scope_; + bool use_cuda_; + bool use_all_reduce_; }; std::vector &ParallelExecutor::GetLocalScopes() { @@ -52,73 +116,116 @@ std::vector &ParallelExecutor::GetLocalScopes() { } ParallelExecutor::ParallelExecutor( - size_t num_threads, bool use_event, const std::vector &places, const std::unordered_set ¶ms, const std::unordered_set &bcast_vars, const ProgramDesc &main_program, const std::string &loss_var_name, - Scope *scope, const std::vector &local_scopes, bool allow_op_delay, - bool use_default_grad_scale) + Scope *scope, const std::vector &local_scopes, + const ExecutionStrategy &exec_strategy, const BuildStrategy &build_strategy, + size_t num_trainers, size_t trainer_id) : member_(new ParallelExecutorPrivate(places)) { member_->global_scope_ = scope; + member_->use_cuda_ = exec_strategy.use_cuda_; + member_->use_all_reduce_ = + build_strategy.reduce_ == BuildStrategy::ReduceStrategy::kAllReduce; + + if (!member_->use_all_reduce_) { + PADDLE_ENFORCE(places.size() > 1, + "If you set build_strategy.reduce with 'Reduce'," + "the number of places must be greater than 1."); + } // Step 1. Bcast the params to devs. // Create local scopes if (local_scopes.empty()) { - member_->own_local_scope = true; + member_->own_local_scope_ = true; member_->local_scopes_.emplace_back(member_->global_scope_); for (size_t i = 1; i < member_->places_.size(); ++i) { member_->local_scopes_.emplace_back(&scope->NewScope()); } } else { - member_->own_local_scope = false; + member_->own_local_scope_ = false; PADDLE_ENFORCE_EQ(member_->places_.size(), local_scopes.size()); for (size_t i = 0; i < member_->places_.size(); ++i) { member_->local_scopes_.emplace_back(&local_scopes[i]->NewScope()); } } + if (member_->use_cuda_) { // Bcast Parameters to all GPUs #ifdef PADDLE_WITH_CUDA - member_->nccl_ctxs_.reset(new platform::NCCLContextMap(member_->places_)); + auto *nccl_id_var = scope->FindVar(NCCL_ID_VARNAME); + ncclUniqueId *nccl_id = nullptr; + if (nccl_id_var != nullptr) { + nccl_id = nccl_id_var->GetMutable(); + } + member_->nccl_ctxs_.reset(new platform::NCCLContextMap( + member_->places_, nccl_id, num_trainers, trainer_id)); +#else + PADDLE_THROW("Not compiled with CUDA"); #endif - if (platform::is_gpu_place(places[0]) && member_->local_scopes_.size() != 1 && - local_scopes.empty()) { // Is CUDA - BCastParamsToGPUs(bcast_vars); } -// Startup Program has been run. All local scopes has correct parameters. -// Step 2. Convert main_program to SSA form and dependency graph. Also, insert + if (member_->local_scopes_.size() != 1 && local_scopes.empty()) { + BCastParamsToDevices(bcast_vars); + } + // Startup Program has been run. All local scopes has correct parameters. + + // Step 2. Create vars in each scope; + std::vector var_infos; + for (auto *var : main_program.Block(0).AllVars()) { + var_infos.emplace_back(); + var_infos.back().name_ = var->Name(); + var_infos.back().type_ = var->GetType(); + var_infos.back().persistable_ = var->Persistable(); + } + +// Step 3. Convert main_program to SSA form and dependency graph. Also, insert // ncclOp #ifdef PADDLE_WITH_CUDA - details::MultiDevSSAGraphBuilder builder( - member_->places_, loss_var_name, params, member_->local_scopes_, - member_->nccl_ctxs_.get(), use_default_grad_scale); + std::unique_ptr graph = ApplyParallelExecutorPass( + main_program, member_->places_, loss_var_name, params, + member_->local_scopes_, member_->use_cuda_, build_strategy, + member_->nccl_ctxs_.get()); #else - details::MultiDevSSAGraphBuilder builder(member_->places_, loss_var_name, - params, member_->local_scopes_, - use_default_grad_scale); + std::unique_ptr graph = ApplyParallelExecutorPass( + main_program, member_->places_, loss_var_name, params, + member_->local_scopes_, member_->use_cuda_, build_strategy); #endif - auto graph = builder.Build(main_program); member_->executor_.reset(new details::ThreadedSSAGraphExecutor( - num_threads, use_event, member_->local_scopes_, places, std::move(graph), - allow_op_delay)); - - // Step 3. Create vars in each scope; - for (auto *var : main_program.Block(0).AllVars()) { - member_->var_types_.emplace_back(var->Name(), var->GetType(), - var->Persistable()); - } + exec_strategy, member_->local_scopes_, places, std::move(graph))); + member_->executor_.reset(new details::ScopeBufferedSSAGraphExecutor( + exec_strategy, member_->local_scopes_, std::move(var_infos), + member_->places_, std::move(member_->executor_))); } -void ParallelExecutor::BCastParamsToGPUs( +void ParallelExecutor::BCastParamsToDevices( const std::unordered_set &vars) const { -#ifdef PADDLE_WITH_CUDA - auto *main_scope = member_->local_scopes_[0]; - + // the initializing bcast, all vars would be bcast from device(0), + // otherwise + // bcast from the specified device. + bool initializing = member_->executor_ ? false : true; for (auto &var : vars) { - auto *main_var = main_scope->FindVar(var); + int var_dev_id = -1; + if (member_->executor_) { + auto &sharded_var_device = + member_->executor_->Graph().Get( + details::kShardedVarDevice); + if (sharded_var_device.find(var) != sharded_var_device.end()) { + var_dev_id = sharded_var_device.at(var); + } + } + + if (!initializing && var_dev_id == -1) continue; + + framework::Variable *main_var = nullptr; + if (initializing) { + main_var = member_->local_scopes_[0]->FindVar(var); + } else { + main_var = member_->local_scopes_[var_dev_id]->FindVar(var); + } + if (main_var == nullptr || !main_var->IsType()) { continue; } @@ -126,13 +233,16 @@ void ParallelExecutor::BCastParamsToGPUs( auto &main_tensor = main_var->Get(); auto &dims = main_tensor.dims(); if (paddle::platform::is_gpu_place(main_tensor.place())) { +#ifdef PADDLE_WITH_CUDA + std::vector buffers; size_t numel = main_tensor.numel(); ncclDataType_t data_type = platform::ToNCCLDataType(main_tensor.type()); - platform::NCCLGroupGuard guard; for (size_t i = 0; i < member_->places_.size(); ++i) { auto place = member_->places_[i]; void *buffer; - if (i == 0) { + + if ((initializing && i == 0) || + (!initializing && static_cast(i) == var_dev_id)) { buffer = const_cast(main_tensor.data()); } else { auto local_scope = member_->local_scopes_[i]; @@ -140,66 +250,62 @@ void ParallelExecutor::BCastParamsToGPUs( t->Resize(dims); buffer = t->mutable_data(place, main_tensor.type()); } - auto &nccl_ctx = member_->nccl_ctxs_->at(place); - platform::dynload::ncclBcast(buffer, numel, data_type, 0, - nccl_ctx.comm_, nccl_ctx.stream()); + buffers.push_back(buffer); } + + PADDLE_ENFORCE_EQ(member_->places_.size(), buffers.size(), + "variables' buffer size to bcast NOT equal to places"); + { + platform::NCCLGroupGuard guard; + for (size_t i = 0; i < member_->places_.size(); ++i) { + auto &nccl_ctx = member_->nccl_ctxs_->at(member_->places_[i]); + if (initializing) { + platform::dynload::ncclBcast(buffers[i], numel, data_type, 0, + nccl_ctx.comm_, nccl_ctx.stream()); + } else { + if (var_dev_id >= 0) { + platform::dynload::ncclBcast(buffers[i], numel, data_type, + var_dev_id, nccl_ctx.comm_, + nccl_ctx.stream()); + } + } + } + member_->nccl_ctxs_->WaitAll(); + } + +#else + PADDLE_THROW("Not compiled with CUDA"); +#endif } else { platform::CPUPlace cpu; - for (size_t i = 1; i < member_->places_.size(); ++i) { + for (size_t i = 0; i < member_->places_.size(); ++i) { + if ((initializing && i == 0) || + (!initializing && static_cast(i) == var_dev_id)) + continue; + auto local_scope = member_->local_scopes_[i]; auto *t = local_scope->Var(var)->GetMutable(); - t->Resize(dims); - t->mutable_data(cpu, main_tensor.type()); - paddle::framework::TensorCopy(main_tensor, cpu, t); + + // FIXME(zcd): LR_DECAY_COUNTER should not be shared. This is a hot fix. + if (member_->use_all_reduce_ || member_->use_cuda_ || + var == "@LR_DECAY_COUNTER@") { + t->Resize(dims); + t->mutable_data(cpu, main_tensor.type()); + paddle::framework::TensorCopy(main_tensor, cpu, t); + } else { + t->ShareDataWith(main_tensor); + } } } - member_->nccl_ctxs_->WaitAll(); } -#else - PADDLE_THROW("Not compiled with CUDA"); -#endif } void ParallelExecutor::Run(const std::vector &fetch_tensors, const std::string &fetched_var_name) { platform::RecordBlock b(0); - // Create local scopes. - for (auto it = member_->local_scopes_.rbegin(); - it != member_->local_scopes_.rend(); ++it) { - auto &scope = *it; - Scope &local_scope = scope->NewScope(); - *scope->Var(details::kLocalExecScopeName)->GetMutable() = - &local_scope; - - for (auto &name_type_pair : member_->var_types_) { - if (scope->FindVar(std::get<0>(name_type_pair)) != nullptr) { - continue; - } - - if (std::get<2>(name_type_pair)) { // Persistable - InitializeVariable(scope->Var(std::get<0>(name_type_pair)), - std::get<1>(name_type_pair)); - } else { - InitializeVariable(local_scope.Var(std::get<0>(name_type_pair)), - std::get<1>(name_type_pair)); - } - } - } - auto fetch_data = member_->executor_->Run(fetch_tensors); *member_->global_scope_->Var(fetched_var_name)->GetMutable() = fetch_data; - - // Wait All computational streams - for (auto p : member_->places_) { - platform::DeviceContextPool::Instance().Get(p)->Wait(); - } - for (auto &scope : member_->local_scopes_) { - auto &local_scope = - *scope->Var(details::kLocalExecScopeName)->GetMutable(); - scope->DeleteScope(local_scope); - } } void ParallelExecutor::FeedTensorsIntoLocalScopes( @@ -237,7 +343,7 @@ void ParallelExecutor::FeedAndSplitTensorIntoLocalScopes( } ParallelExecutor::~ParallelExecutor() { - if (member_->own_local_scope) { + if (member_->own_local_scope_) { for (size_t i = 1; i < member_->local_scopes_.size(); ++i) { member_->global_scope_->DeleteScope(member_->local_scopes_[i]); } @@ -246,3 +352,8 @@ ParallelExecutor::~ParallelExecutor() { } // namespace framework } // namespace paddle + +USE_PASS(graph_viz_pass); +USE_PASS(multi_devices_pass); +USE_PASS(multi_devices_check_pass); +USE_PASS(multi_devices_print_pass); diff --git a/paddle/fluid/framework/parallel_executor.h b/paddle/fluid/framework/parallel_executor.h index ecd107d81f..5fb748fa20 100644 --- a/paddle/fluid/framework/parallel_executor.h +++ b/paddle/fluid/framework/parallel_executor.h @@ -14,9 +14,12 @@ limitations under the License. */ #pragma once +#include #include #include #include +#include "paddle/fluid/framework/details/execution_strategy.h" +#include "paddle/fluid/framework/details/multi_devices_graph_pass.h" #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/op_info.h" #include "paddle/fluid/framework/program_desc.h" @@ -29,40 +32,44 @@ namespace framework { class ParallelExecutorPrivate; +using details::BuildStrategy; +using details::ExecutionStrategy; + class ParallelExecutor { DISABLE_COPY_AND_ASSIGN(ParallelExecutor); public: - explicit ParallelExecutor(size_t num_threads, bool use_event, - const std::vector& places, - const std::unordered_set& params, - const std::unordered_set& bcast_vars, - const ProgramDesc& main_program, - const std::string& loss_var_name, Scope* scope, - const std::vector& local_scopes, - bool allow_op_delay, bool use_default_grad_scale); + explicit ParallelExecutor(const std::vector &places, + const std::unordered_set ¶ms, + const std::unordered_set &bcast_vars, + const ProgramDesc &main_program, + const std::string &loss_var_name, Scope *scope, + const std::vector &local_scopes, + const ExecutionStrategy &exec_strategy, + const BuildStrategy &build_strategy, + size_t num_trainers = 1, size_t trainer_id = 0); ~ParallelExecutor(); - std::vector& GetLocalScopes(); + std::vector &GetLocalScopes(); /** * Feed tensors to local scopes. The size of tensors should be equal to the * size of local scopes. */ void FeedTensorsIntoLocalScopes( - const std::vector>& tensors); + const std::vector> &tensors); void FeedAndSplitTensorIntoLocalScopes( - const std::unordered_map& tensors); + const std::unordered_map &tensors); - void Run(const std::vector& fetch_tensors, - const std::string& fetched_var_name); + void Run(const std::vector &fetch_tensors, + const std::string &fetched_var_name); - void BCastParamsToGPUs(const std::unordered_set& vars) const; + void BCastParamsToDevices(const std::unordered_set &vars) const; private: - ParallelExecutorPrivate* member_; + ParallelExecutorPrivate *member_; }; } // namespace framework diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index 64fb028f83..20bdc7830f 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -51,12 +51,15 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { auto *block = desc_.mutable_blocks(i); blocks_.emplace_back(new BlockDesc(*o.blocks_[i], block, this)); } - for (auto &block : blocks_) { - for (auto *op : block->AllOps()) { - for (const auto &attr : op->Proto()->attrs()) { - if (attr.type() == proto::AttrType::BLOCK) { - size_t blk_idx = attr.block_idx(); - op->SetBlockAttr(attr.name(), this->MutableBlock(blk_idx)); + for (size_t block_id = 0; block_id < blocks_.size(); ++block_id) { + auto all_ops = blocks_[block_id]->AllOps(); + for (size_t op_id = 0; op_id < all_ops.size(); ++op_id) { + auto &op = all_ops[op_id]; + for (const std::string &attr_name : op->AttrNames()) { + if (op->GetAttrType(attr_name) == proto::AttrType::BLOCK) { + int sub_block_id = + o.Block(block_id).Op(op_id)->GetBlockAttrId(attr_name); + op->SetBlockAttr(attr_name, MutableBlock(sub_block_id)); } } } @@ -86,6 +89,16 @@ ProgramDesc::ProgramDesc(const std::string &binary_str) { for (auto &block_desc : *desc_.mutable_blocks()) { blocks_.emplace_back(new BlockDesc(this, &block_desc)); } + for (auto &block : blocks_) { + for (auto *op : block->AllOps()) { + for (const auto &attr : op->Proto()->attrs()) { + if (attr.type() == proto::AttrType::BLOCK) { + size_t blk_idx = attr.block_idx(); + op->SetBlockAttr(attr.name(), this->MutableBlock(blk_idx)); + } + } + } + } } const std::vector ProgramDesc::GetFeedTargetNames() { diff --git a/paddle/fluid/framework/reader.cc b/paddle/fluid/framework/reader.cc index 76126f3dc6..40eafda9bf 100644 --- a/paddle/fluid/framework/reader.cc +++ b/paddle/fluid/framework/reader.cc @@ -13,27 +13,62 @@ // limitations under the License. #include "paddle/fluid/framework/reader.h" +#include namespace paddle { namespace framework { -ReaderBase::~ReaderBase() {} - -FileReader::FileReader(const std::vector &dims) : dims_(dims) {} -void FileReader::ReadNext(std::vector *out) { +void ReaderBase::ReadNext(std::vector *out) { + std::lock_guard lock(mu_); + PADDLE_ENFORCE_EQ(status_, ReaderStatus::kRunning); ReadNextImpl(out); - if (out->empty()) { - return; - } - for (size_t i = 0; i < dims_.size(); ++i) { - auto &actual = out->at(i).dims(); - auto &expect = dims_[i]; +} - PADDLE_ENFORCE_EQ(actual.size(), expect.size()); - for (int j = 0; j < actual.size(); ++j) { - // PADDLE_ENFORCE(actual[i] == expect[i] || expect[i] == -1); +void ReaderBase::InsertDecoratedReader( + const std::shared_ptr &decorated_reader) { + std::lock_guard guard(mu_); + decorated_readers_.emplace_back(decorated_reader); +} + +std::unordered_set ReaderBase::GetEndPoints() { + std::unordered_set result; + std::deque queue; + queue.emplace_back(this); + while (!queue.empty()) { // BFS search + auto *front = queue.front(); + queue.pop_front(); + if (front->decorated_readers_.empty()) { + result.emplace(front); + } else { + for (auto &reader : front->decorated_readers_) { + if (auto *reader_ptr = reader.lock().get()) { + queue.emplace_back(reader_ptr); + } + } } } + + return result; +} + +void ReaderBase::Shutdown() { + std::lock_guard lock(mu_); + if (status_ != ReaderStatus::kStopped) { + ShutdownImpl(); + status_ = ReaderStatus::kStopped; + } +} + +void ReaderBase::Start() { + std::lock_guard lock(mu_); + if (status_ != ReaderStatus::kRunning) { + StartImpl(); + status_ = ReaderStatus::kRunning; + } } + +ReaderBase::~ReaderBase() {} + +DecoratedReader::~DecoratedReader() { reader_->Shutdown(); } } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/reader.h b/paddle/fluid/framework/reader.h index 3a413941df..82562bf883 100644 --- a/paddle/fluid/framework/reader.h +++ b/paddle/fluid/framework/reader.h @@ -15,6 +15,7 @@ #pragma once #include +#include #include #include "paddle/fluid/framework/ddim.h" @@ -26,58 +27,116 @@ namespace framework { class ReaderBase { public: - virtual void ReadNext(std::vector* out) = 0; + virtual void ReadNext(std::vector* out); - virtual void ReInit() = 0; + virtual void Shutdown(); - virtual ~ReaderBase(); -}; + virtual void Start(); -class DecoratedReader : public ReaderBase { - public: - explicit DecoratedReader(ReaderBase* reader) : ReaderBase(), reader_(reader) { - PADDLE_ENFORCE_NOT_NULL(reader_); - } + // Return the readers which are the end of decorating chain. Basically + // they are readers just before read op. + std::unordered_set GetEndPoints(); - void ReInit() override { reader_->ReInit(); } + virtual ~ReaderBase(); protected: - ReaderBase* reader_; + virtual void ReadNextImpl(std::vector* out) {} + + virtual void ShutdownImpl() {} + + virtual void StartImpl() {} + + enum ReaderStatus { kRunning, kStopped }; + + ReaderStatus status_{kRunning}; + + mutable std::mutex mu_; + + private: + friend class DecoratedReader; + // These methods can be only invoked inside DecoratedReader to record the + // decorating chain. + void InsertDecoratedReader( + const std::shared_ptr& decorated_reader); + // A set of which readers that decorated this reader. + std::vector> decorated_readers_; }; -class FileReader : public ReaderBase { +class DecoratedReader : public ReaderBase, + public std::enable_shared_from_this { public: - explicit FileReader(const std::vector& dims); + explicit DecoratedReader(const std::shared_ptr& reader) + : ReaderBase(), reader_(reader) { + PADDLE_ENFORCE_NOT_NULL(reader_); + } - void ReadNext(std::vector* out) override; + void RegisterDecorateChain() { + reader_->InsertDecoratedReader(shared_from_this()); + } + + ~DecoratedReader(); protected: - virtual void ReadNextImpl(std::vector* out) = 0; + void ShutdownImpl() override { reader_->Shutdown(); } - private: - std::vector dims_; + void StartImpl() override { reader_->Start(); } + + std::shared_ptr reader_; }; +// FileReader is just a conceptual class. +class FileReader : public ReaderBase {}; + // The ReaderHolder is used as reader' unified wrapper, // making it easier to access different type reader in Variables. class ReaderHolder { public: - void Reset(ReaderBase* reader) { reader_.reset(reader); } + template + void Reset(const std::shared_ptr& reader) { + auto reader_base = std::dynamic_pointer_cast(reader); + PADDLE_ENFORCE_NOT_NULL(reader_base); + reader_ = reader_base; + } - ReaderBase* Get() const { return reader_.get(); } + const std::shared_ptr& Get() const { return reader_; } void ReadNext(std::vector* out) { PADDLE_ENFORCE_NOT_NULL(reader_); reader_->ReadNext(out); } - void ReInit() { + + void ResetAll() { + auto end_readers = reader_->GetEndPoints(); + for (auto* reader : end_readers) { + reader->Shutdown(); + } + for (auto* reader : end_readers) { + reader->Start(); + } + } + + void Shutdown() { PADDLE_ENFORCE_NOT_NULL(reader_); - reader_->ReInit(); + reader_->Shutdown(); } + void Start() { + PADDLE_ENFORCE_NOT_NULL(reader_); + reader_->Start(); + } + + operator const std::shared_ptr&() const { return this->reader_; } + private: - std::unique_ptr reader_; + std::shared_ptr reader_; }; +template +inline std::shared_ptr MakeDecoratedReader(ARGS&&... args) { + std::shared_ptr reader(new T(std::forward(args)...)); + reader->RegisterDecorateChain(); + return reader; +} + } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/reader_test.cc b/paddle/fluid/framework/reader_test.cc new file mode 100644 index 0000000000..f0d07cb7c1 --- /dev/null +++ b/paddle/fluid/framework/reader_test.cc @@ -0,0 +1,52 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/reader.h" +#include +#include "gtest/gtest.h" + +class StubDecoratedReader : public paddle::framework::DecoratedReader { + public: + explicit StubDecoratedReader(const std::shared_ptr &reader) + : DecoratedReader(reader) {} + + void ReadNextImpl(std::vector *out) override {} +}; + +class StubRootReader : public paddle::framework::ReaderBase { + public: + void ReadNextImpl(std::vector *out) override {} +}; + +TEST(READER, decorate_chain) { + auto root = std::make_shared(); + auto end_point1 = + paddle::framework::MakeDecoratedReader(root); + auto end_point2 = + paddle::framework::MakeDecoratedReader(root); + + { + auto endpoints = root->GetEndPoints(); + ASSERT_EQ(endpoints.size(), 2U); + ASSERT_NE(endpoints.count(end_point1.get()), 0); + ASSERT_NE(endpoints.count(end_point2.get()), 0); + } + + { + auto end_point3 = + paddle::framework::MakeDecoratedReader(root); + ASSERT_EQ(root->GetEndPoints().size(), 3U); + } + { ASSERT_EQ(root->GetEndPoints().size(), 2U); } +} diff --git a/paddle/fluid/framework/scope.cc b/paddle/fluid/framework/scope.cc index 9091713158..50f374e370 100644 --- a/paddle/fluid/framework/scope.cc +++ b/paddle/fluid/framework/scope.cc @@ -34,13 +34,7 @@ DEFINE_bool( namespace paddle { namespace framework { -Scope::~Scope() { - DropKids(); - for (auto& kv : vars_) { - VLOG(3) << "Destroy variable " << kv.first; - delete kv.second; - } -} +Scope::~Scope() { DropKids(); } Scope& Scope::NewScope() const { std::unique_lock lock(mutex_); @@ -49,45 +43,37 @@ Scope& Scope::NewScope() const { } Variable* Scope::Var(const std::string& name) { - auto* v = FindVarLocally(name); - if (v != nullptr) return v; - v = new Variable(); - vars_[name] = v; - VLOG(3) << "Create variable " << name; - v->name_ = &(vars_.find(name)->first); - return v; + std::unique_lock lock(mutex_); + return VarInternal(name); } Variable* Scope::Var(std::string* name) { - auto var_name = string::Sprintf("%p.%d", this, vars_.size()); + std::unique_lock lock(mutex_); + auto new_name = string::Sprintf("%p.%d", this, vars_.size()); if (name != nullptr) { - *name = var_name; + *name = new_name; } - return Var(var_name); + return VarInternal(new_name); } Variable* Scope::FindVar(const std::string& name) const { - auto var = FindVarLocally(name); - if (var != nullptr) { - return var; - } - return (parent_ == nullptr) ? nullptr : parent_->FindVar(name); + std::unique_lock lock(mutex_); + return FindVarInternal(name); } const Scope* Scope::FindScope(const Variable* var) const { - for (auto& kv : vars_) { - if (kv.second == var) { - return this; - } - } - return (parent_ == nullptr) ? nullptr : parent_->FindScope(var); + std::unique_lock lock(mutex_); + return FindScopeInternal(var); } + void Scope::DropKids() { + std::unique_lock lock(mutex_); for (Scope* s : kids_) delete s; kids_.clear(); } std::vector Scope::LocalVarNames() const { + std::unique_lock lock(mutex_); std::vector known_vars; known_vars.reserve(this->vars_.size()); for (auto& p : vars_) { @@ -110,10 +96,10 @@ void Scope::DeleteScope(Scope* scope) const { } void Scope::EraseVars(const std::vector& var_names) { + std::unique_lock lock(mutex_); std::set var_set(var_names.begin(), var_names.end()); for (auto it = vars_.begin(); it != vars_.end();) { if (var_set.find(it->first) != var_set.end()) { - delete it->second; it = vars_.erase(it); } else { ++it; @@ -123,25 +109,60 @@ void Scope::EraseVars(const std::vector& var_names) { void Scope::Rename(const std::string& origin_name, const std::string& new_name) const { + std::unique_lock lock(mutex_); + RenameInternal(origin_name, new_name); +} + +std::string Scope::Rename(const std::string& origin_name) const { + std::unique_lock lock(mutex_); + auto new_name = string::Sprintf("%p.%d", this, vars_.size()); + RenameInternal(origin_name, new_name); + return new_name; +} + +Variable* Scope::VarInternal(const std::string& name) { + auto* v = FindVarLocally(name); + if (v != nullptr) return v; + + v = new Variable(); + vars_[name].reset(v); + VLOG(3) << "Create variable " << name; + v->name_ = &(vars_.find(name)->first); + return v; +} + +const Scope* Scope::FindScopeInternal(const Variable* var) const { + for (auto& kv : vars_) { + if (kv.second.get() == var) { + return this; + } + } + return (parent_ == nullptr) ? nullptr : parent_->FindScope(var); +} + +void Scope::RenameInternal(const std::string& origin_name, + const std::string& new_name) const { auto origin_it = vars_.find(origin_name); PADDLE_ENFORCE(origin_it != vars_.end(), "Cannot find original variable with name %s", origin_name); auto new_it = vars_.find(new_name); PADDLE_ENFORCE(new_it == vars_.end(), "The variable with name %s is already in the scope", new_name); - vars_[new_name] = origin_it->second; + vars_[new_name].reset(origin_it->second.release()); vars_.erase(origin_it); } -std::string Scope::Rename(const std::string& origin_name) const { - auto var_name = string::Sprintf("%p.%d", this, vars_.size()); - Rename(origin_name, var_name); - return var_name; +Variable* Scope::FindVarInternal(const std::string& name) const { + auto var = FindVarLocally(name); + if (var != nullptr) { + return var; + } + return (parent_ == nullptr) ? nullptr : parent_->FindVar(name); } Variable* Scope::FindVarLocally(const std::string& name) const { auto it = vars_.find(name); - if (it != vars_.end()) return it->second; + if (it != vars_.end()) return it->second.get(); return nullptr; } diff --git a/paddle/fluid/framework/scope.h b/paddle/fluid/framework/scope.h index abc82e452d..e246241c0a 100644 --- a/paddle/fluid/framework/scope.h +++ b/paddle/fluid/framework/scope.h @@ -47,15 +47,18 @@ class Scope { Scope& NewScope() const; /// Create a variable with given name if it doesn't exist. + /// Caller doesn't own the returned Variable. Variable* Var(const std::string& name); /// Create a variable with a scope-unique name. + /// Caller doesn't own the returned Variable. Variable* Var(std::string* name = nullptr); void EraseVars(const std::vector& var_names); /// Find a variable in the scope or any of its ancestors. Returns /// nullptr if cannot find. + /// Caller doesn't own the returned Variable. Variable* FindVar(const std::string& name) const; const Scope* parent() const { return parent_; } @@ -78,13 +81,30 @@ class Scope { // Rename variable to a new name and return the new name std::string Rename(const std::string& origin_name) const; - Variable* FindVarLocally(const std::string& name) const; + protected: + mutable std::unordered_map> vars_; private: // Call Scope::NewScope for a sub-scope. explicit Scope(Scope const* parent) : parent_(parent) {} - mutable std::unordered_map vars_; + // Called by Var. + Variable* VarInternal(const std::string& name); + + // Called by FindScope. + const Scope* FindScopeInternal(const Variable* var) const; + + // Called by Rename. + void RenameInternal(const std::string& origin_name, + const std::string& new_name) const; + + // Called by FindVar recursively. + Variable* FindVarInternal(const std::string& name) const; + + // Called by FindVarInternal and Var. + Variable* FindVarLocally(const std::string& name) const; + + // Scope in `kids_` are owned by this class. mutable std::list kids_; Scope const* parent_{nullptr}; diff --git a/paddle/fluid/framework/selected_rows.cc b/paddle/fluid/framework/selected_rows.cc index 56cf6693ca..06ed87e7e8 100644 --- a/paddle/fluid/framework/selected_rows.cc +++ b/paddle/fluid/framework/selected_rows.cc @@ -18,8 +18,8 @@ namespace paddle { namespace framework { struct ReAllocateVisitor { - ReAllocateVisitor(framework::Tensor* tensor, const framework::DDim& dims) - : tensor_(tensor), dims_(dims) {} + ReAllocateVisitor(const framework::DDim& dims, framework::Tensor* tensor) + : dims_(dims), tensor_(tensor) {} template void operator()() const { @@ -34,8 +34,8 @@ struct ReAllocateVisitor { tensor_->ShareDataWith(cpu_tensor); } - framework::Tensor* tensor_; framework::DDim dims_; + framework::Tensor* tensor_; }; struct TensorCopyVisitor { @@ -121,24 +121,29 @@ bool SelectedRows::HasKey(int64_t key) const { } std::vector> SelectedRows::Get( - std::vector keys, framework::Tensor* value) const { + const std::vector& keys, framework::Tensor* value) const { PADDLE_ENFORCE(value->IsInitialized(), "The value tensor should be initialized."); std::vector> non_keys_pair; - int64_t value_width = value_->numel() / value_->dims()[0]; - PADDLE_ENFORCE_EQ(value_width, value->numel() / value->dims()[0], - "output tensor should have the same shape with table " - "execpt the dims[0]."); - - for (size_t i = 0; i < keys.size(); ++i) { - int64_t index = Index(keys[i]); - if (index == -1) { - non_keys_pair.push_back(std::make_pair(keys[i], static_cast(i))); - } else { - framework::VisitDataType( - framework::ToDataType(value_->type()), - TensorCopyVisitor(value, i * value_width, *value_.get(), - index * value_width, value_width)); + if (keys.empty()) { + VLOG(3) << "keys is empty, please check data!"; + } else { + int64_t value_width = value_->numel() / value_->dims()[0]; + PADDLE_ENFORCE_EQ(value_width, value->numel() / value->dims()[0], + "output tensor should have the same shape with table " + "except the dims[0]."); + + for (size_t i = 0; i < keys.size(); ++i) { + int64_t index = Index(keys[i]); + if (index == -1) { + non_keys_pair.push_back( + std::make_pair(keys[i], static_cast(i))); + } else { + framework::VisitDataType( + framework::ToDataType(value_->type()), + TensorCopyVisitor(value, i * value_width, *value_.get(), + index * value_width, value_width)); + } } } return non_keys_pair; @@ -153,6 +158,7 @@ bool SelectedRows::Set(int64_t key, const framework::Tensor& value) { } PADDLE_ENFORCE_EQ(value.dims()[0], static_cast(1), "The first dim of value should be 1."); + std::lock_guard lock(*auto_grown_mutex_.get()); auto index = Index(key); bool is_new_key = false; if (index == -1) { @@ -164,7 +170,7 @@ bool SelectedRows::Set(int64_t key, const framework::Tensor& value) { auto dims = value_->dims(); dims[0] = (dims[0] + 1) << 1; framework::VisitDataType(framework::ToDataType(value.type()), - ReAllocateVisitor(value_.get(), dims)); + ReAllocateVisitor(dims, value_.get())); } } diff --git a/paddle/fluid/framework/selected_rows.h b/paddle/fluid/framework/selected_rows.h index c27c927ee7..7160670ddd 100644 --- a/paddle/fluid/framework/selected_rows.h +++ b/paddle/fluid/framework/selected_rows.h @@ -15,6 +15,8 @@ limitations under the License. */ #pragma once #include +#include +#include // NOLINT #include #include @@ -46,11 +48,13 @@ class SelectedRows { SelectedRows(const std::vector& rows, const int64_t& height) : rows_(rows), height_(height) { value_.reset(new Tensor()); + auto_grown_mutex_.reset(new std::mutex); } SelectedRows() { height_ = 0; value_.reset(new Tensor()); + auto_grown_mutex_.reset(new std::mutex); } platform::Place place() const { return value_->place(); } @@ -82,7 +86,7 @@ class SelectedRows { * @return a list of pair which contains the non-exists key and the index in * the value */ - std::vector> Get(std::vector keys, + std::vector> Get(const std::vector& keys, framework::Tensor* value) const; /* @@ -125,6 +129,7 @@ class SelectedRows { Vector rows_; std::unique_ptr value_{nullptr}; int64_t height_; + std::unique_ptr auto_grown_mutex_{nullptr}; }; /* diff --git a/paddle/fluid/framework/shape_inference.h b/paddle/fluid/framework/shape_inference.h index 46c8feec00..5f497cafa0 100644 --- a/paddle/fluid/framework/shape_inference.h +++ b/paddle/fluid/framework/shape_inference.h @@ -63,6 +63,7 @@ class InferShapeContext { std::vector GetInputVarPtrs(const std::string &name); std::vector GetOutputVarPtrs(const std::string &name); + virtual InferShapeVarPtr GetVarPtr(const std::string &name) = 0; // Note: In while op, we need this to be public void SetDims(const std::vector &names, @@ -81,8 +82,6 @@ class InferShapeContext { const std::vector &names) const; virtual proto::VarType::Type GetVarType(const std::string &name) const = 0; - - virtual InferShapeVarPtr GetVarPtr(const std::string &name) = 0; }; } // namespace framework diff --git a/paddle/fluid/framework/tensor.cc b/paddle/fluid/framework/tensor.cc index e97ada06f0..c7286dacf0 100644 --- a/paddle/fluid/framework/tensor.cc +++ b/paddle/fluid/framework/tensor.cc @@ -15,5 +15,102 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor.h" namespace paddle { -namespace framework {} +namespace framework { +extern size_t SizeOfType(std::type_index type); +void Tensor::check_memory_size() const { + PADDLE_ENFORCE_NOT_NULL( + holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); + PADDLE_ENFORCE_LE( + numel() * SizeOfType(type()), memory_size(), + "Tensor's dims_ is out of bound. Call Tensor::mutable_data " + "first to re-allocate memory.\n" + "or maybe the required data-type mismatches the data already stored."); +} + +size_t Tensor::memory_size() const { + return holder_ == nullptr ? 0UL : holder_->size() - offset_; +} + +void* Tensor::mutable_data(platform::Place place, std::type_index type) { + if (holder_ != nullptr) { + holder_->set_type(type); + } + PADDLE_ENFORCE_GE(numel(), 0, + "When calling this method, the Tensor's numel must be " + "equal or larger than zero. " + "Please check Tensor::Resize has been called first."); + int64_t size = numel() * SizeOfType(type); + /* some versions of boost::variant don't have operator!= */ + if (holder_ == nullptr || !(holder_->place() == place) || + holder_->size() < size + offset_) { + if (platform::is_cpu_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } else if (platform::is_gpu_place(place) || + platform::is_cuda_pinned_place(place)) { +#ifndef PADDLE_WITH_CUDA + PADDLE_THROW( + "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); + } +#else + if (platform::is_gpu_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } else if (platform::is_cuda_pinned_place(place)) { + holder_.reset(new PlaceholderImpl( + boost::get(place), size, type)); + } + } +#endif + offset_ = 0; + } + return reinterpret_cast(reinterpret_cast(holder_->ptr()) + + offset_); +} + +void* Tensor::mutable_data(platform::Place place) { + PADDLE_ENFORCE(this->holder_ != nullptr, + "Cannot invoke mutable data if current hold nothing."); + return mutable_data(place, holder_->type()); +} + +Tensor& Tensor::ShareDataWith(const Tensor& src) { + src.check_memory_size(); + *this = src; + return *this; +} + +Tensor Tensor::Slice(int begin_idx, int end_idx) const { + check_memory_size(); + PADDLE_ENFORCE_GE(begin_idx, 0, + "The start row index must be greater than 0."); + PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound."); + PADDLE_ENFORCE_LT( + begin_idx, end_idx, + "The start row index must be lesser than the end row index."); + + if (dims_[0] == 1) { + return *this; + } else { + size_t base = numel() / dims_[0]; + Tensor dst; + dst.holder_ = holder_; + dst.set_layout(layout_); + DDim dst_dims = dims_; + dst_dims[0] = end_idx - begin_idx; + dst.Resize(dst_dims); + dst.offset_ = offset_ + begin_idx * base * SizeOfType(type()); + return dst; + } +} + +Tensor& Tensor::Resize(const DDim& dims) { + dims_ = dims; + return *this; +} + +const DDim& Tensor::dims() const { return dims_; } + +int64_t Tensor::numel() const { return product(dims_); } +} // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor.h b/paddle/fluid/framework/tensor.h index 6f878541e6..0bbfd66148 100644 --- a/paddle/fluid/framework/tensor.h +++ b/paddle/fluid/framework/tensor.h @@ -34,6 +34,28 @@ namespace framework { class LoDTensor; class Tensor { +#ifdef PADDLE_WITH_MKLDNN + + public: + inline mkldnn::memory::format format() const { return format_; } + + inline void set_format(const mkldnn::memory::format format) { + format_ = format; + } + + protected: + /** + * @brief the detail format of memory block which have layout as kMKLDNN + * + * @note MKLDNN lib support various memory format like nchw, nhwc, nChw8C, + * nChw16c, etc. For a MKLDNN memory block, layout will be set as + * DataLayout::kMKLDNN meanwhile detail memory format will be kept in + * this field. + */ + + mkldnn::memory::format format_ = mkldnn::memory::format::format_undef; +#endif + public: template friend struct EigenTensor; @@ -54,26 +76,24 @@ class Tensor { /*! Return a pointer to mutable memory block. */ template - inline T* data(); + T* data(); /*! Return a pointer to constant memory block. */ template - inline const T* data() const; + const T* data() const; inline bool IsInitialized() const; - inline void switch_place(platform::Place new_place); - /** * @brief Return a pointer to mutable memory block. * @note If not exist, then allocation. */ template - inline T* mutable_data(platform::Place place); + T* mutable_data(platform::Place place); - inline void* mutable_data(platform::Place place, std::type_index type); + void* mutable_data(platform::Place place, std::type_index type); - inline void* mutable_data(platform::Place place); + void* mutable_data(platform::Place place); /** * @brief Return a pointer to mutable memory block. @@ -84,19 +104,19 @@ class Tensor { * @note If not exist, then allocation. */ template - inline T* mutable_data(DDim dims, platform::Place place); + T* mutable_data(DDim dims, platform::Place place); /*! Return the dimensions of the memory block. */ - inline const DDim& dims() const; + const DDim& dims() const; /*! Return the numel of the memory block. */ - inline int64_t numel() const; + int64_t numel() const; /*! Resize the dimensions of the memory block. */ - inline Tensor& Resize(const DDim& dims); + Tensor& Resize(const DDim& dims); /*! The internal of two tensors share the same memory block. */ - inline Tensor& ShareDataWith(const Tensor& src); + Tensor& ShareDataWith(const Tensor& src); /** * @brief Return a sub-tensor of the given tensor. @@ -106,7 +126,7 @@ class Tensor { * @param[in] end_idx The index of the end row(exclusive) to slice. * The index number begins from 0. */ - inline Tensor Slice(int begin_idx, int end_idx) const; + Tensor Slice(int begin_idx, int end_idx) const; platform::Place place() const { PADDLE_ENFORCE_NOT_NULL( @@ -123,11 +143,11 @@ class Tensor { // memory size returns the holding memory size in byte. size_t memory_size() const; - inline void check_memory_size() const; + void check_memory_size() const; - inline DataLayout layout() const { return layout_; } + DataLayout layout() const { return layout_; } - inline void set_layout(const DataLayout layout) { layout_ = layout; } + void set_layout(const DataLayout layout) { layout_ = layout; } private: /** @@ -197,8 +217,10 @@ class Tensor { * N,C,H,W for respectively the batch size, the number of * feature maps, the height. */ - - DataLayout layout_ = DataLayout::kNHWC; + // Fix me: here just change the default layout to kNCHW + // it doesn't fix the real issue, i.e. feeder should set up tensor layout + // according to actual input data + DataLayout layout_ = DataLayout::kNCHW; /** * @brief A PlaceHolder may be shared by more than one tensor. @@ -210,15 +232,6 @@ class Tensor { size_t offset_; }; -inline void Tensor::switch_place(platform::Place new_place) { - if (holder_->place() == new_place) { - return; - } - - // TODO(tonyyang-svail): do memcpy here. - PADDLE_THROW("Not Implemented"); -} - } // namespace framework } // namespace paddle diff --git a/paddle/fluid/framework/tensor_impl.h b/paddle/fluid/framework/tensor_impl.h index f49d1a47a3..7f678f869a 100644 --- a/paddle/fluid/framework/tensor_impl.h +++ b/paddle/fluid/framework/tensor_impl.h @@ -13,74 +13,19 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/memory/memcpy.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/float16.h" namespace paddle { namespace framework { - -template -struct SizeOfTypeFunctor; - -template -struct SizeOfTypeFunctor { - size_t operator()(std::type_index type) const { - if (typeid(T).hash_code() == type.hash_code()) { - return sizeof(T); - } else { - return 0UL; - } - } -}; - -template <> -struct SizeOfTypeFunctor<> { - size_t operator()(std::type_index type) const { return 0UL; } -}; - -template -struct SizeOfTypeFunctor { - size_t operator()(std::type_index type) const { - SizeOfTypeFunctor head; - size_t head_size = head(type); - if (head_size != 0) { - return head_size; - } - SizeOfTypeFunctor tail; - return tail(type); - } -}; - -static inline size_t SizeOfType(std::type_index type) { - SizeOfTypeFunctor - functor; - size_t size = functor(type); - PADDLE_ENFORCE(size != 0UL, "Cannot get size of type %s", type.name()); - return size; -} - -inline void Tensor::check_memory_size() const { - PADDLE_ENFORCE_NOT_NULL( - holder_, "Tensor holds no memory. Call Tensor::mutable_data first."); - PADDLE_ENFORCE_LE( - numel() * SizeOfType(type()), memory_size(), - "Tensor's dims_ is out of bound. Call Tensor::mutable_data " - "first to re-allocate memory.\n" - "or maybe the required data-type mismatches the data already stored."); -} - -inline size_t Tensor::memory_size() const { - return holder_ == nullptr ? 0UL : holder_->size() - offset_; -} - template inline const T* Tensor::data() const { check_memory_size(); - PADDLE_ENFORCE(std::is_same::value || - holder_->type().hash_code() == typeid(T).hash_code(), - "Tensor holds the wrong type, it holds %s", + bool valid = std::is_same::value || + holder_->type() == std::type_index(typeid(T)); + PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %s", this->holder_->type().name()); return reinterpret_cast( @@ -92,9 +37,9 @@ inline bool Tensor::IsInitialized() const { return holder_ != nullptr; } template inline T* Tensor::data() { check_memory_size(); - PADDLE_ENFORCE(std::is_same::value || - holder_->type().hash_code() == typeid(T).hash_code(), - "Tensor holds the wrong type, it holds %s", + bool valid = std::is_same::value || + holder_->type() == std::type_index(typeid(T)); + PADDLE_ENFORCE(valid, "Tensor holds the wrong type, it holds %s", this->holder_->type().name()); return reinterpret_cast(reinterpret_cast(holder_->ptr()) + offset_); @@ -113,88 +58,6 @@ inline T* Tensor::mutable_data(platform::Place place) { return reinterpret_cast(mutable_data(place, typeid(T))); } -inline void* Tensor::mutable_data(platform::Place place, std::type_index type) { - if (holder_ != nullptr) { - holder_->set_type(type); - } - PADDLE_ENFORCE_GE(numel(), 0, - "When calling this method, the Tensor's numel must be " - "equal or larger than zero. " - "Please check Tensor::Resize has been called first."); - int64_t size = numel() * SizeOfType(type); - /* some versions of boost::variant don't have operator!= */ - if (holder_ == nullptr || !(holder_->place() == place) || - holder_->size() < size + offset_) { - if (platform::is_cpu_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); - } else if (platform::is_gpu_place(place) || - platform::is_cuda_pinned_place(place)) { -#ifndef PADDLE_WITH_CUDA - PADDLE_THROW( - "CUDAPlace or CUDAPinnedPlace is not supported in CPU-only mode."); - } -#else - if (platform::is_gpu_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); - } else if (platform::is_cuda_pinned_place(place)) { - holder_.reset(new PlaceholderImpl( - boost::get(place), size, type)); - } - } -#endif - offset_ = 0; - } - return reinterpret_cast(reinterpret_cast(holder_->ptr()) + - offset_); -} - -inline void* Tensor::mutable_data(platform::Place place) { - PADDLE_ENFORCE(this->holder_ != nullptr, - "Cannot invoke mutable data if current hold nothing."); - return mutable_data(place, holder_->type()); -} - -inline Tensor& Tensor::ShareDataWith(const Tensor& src) { - src.check_memory_size(); - *this = src; - return *this; -} - -inline Tensor Tensor::Slice(int begin_idx, int end_idx) const { - check_memory_size(); - PADDLE_ENFORCE_GE(begin_idx, 0, - "The start row index must be greater than 0."); - PADDLE_ENFORCE_LE(end_idx, dims_[0], "The end row index is out of bound."); - PADDLE_ENFORCE_LT( - begin_idx, end_idx, - "The start row index must be lesser than the end row index."); - - if (dims_[0] == 1) { - return *this; - } else { - size_t base = numel() / dims_[0]; - Tensor dst; - dst.holder_ = holder_; - dst.set_layout(layout_); - DDim dst_dims = dims_; - dst_dims[0] = end_idx - begin_idx; - dst.Resize(dst_dims); - dst.offset_ = offset_ + begin_idx * base * SizeOfType(type()); - return dst; - } -} - -inline Tensor& Tensor::Resize(const DDim& dims) { - dims_ = dims; - return *this; -} - -inline const DDim& Tensor::dims() const { return dims_; } - -inline int64_t Tensor::numel() const { return product(dims_); } - inline Tensor ReshapeToMatrix(const Tensor& src, int num_col_dims) { Tensor res; res.ShareDataWith(src); diff --git a/paddle/fluid/framework/tensor_test.cc b/paddle/fluid/framework/tensor_test.cc index e1012de2ec..cb2061c06a 100644 --- a/paddle/fluid/framework/tensor_test.cc +++ b/paddle/fluid/framework/tensor_test.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/tensor.h" #include #include +#include "paddle/fluid/platform/float16.h" namespace framework = paddle::framework; namespace platform = paddle::platform; @@ -209,7 +210,21 @@ TEST(Tensor, ReshapeToMatrix) { TEST(Tensor, Layout) { framework::Tensor src; - ASSERT_EQ(src.layout(), framework::DataLayout::kNHWC); + ASSERT_EQ(src.layout(), framework::DataLayout::kNCHW); src.set_layout(framework::DataLayout::kAnyLayout); ASSERT_EQ(src.layout(), framework::DataLayout::kAnyLayout); } + +TEST(Tensor, FP16) { + using platform::float16; + framework::Tensor src; + float16* src_ptr = src.mutable_data({2, 3}, platform::CPUPlace()); + for (int i = 0; i < 2 * 3; ++i) { + src_ptr[i] = static_cast(i); + } + EXPECT_EQ(src.memory_size(), 2 * 3 * sizeof(float16)); + // EXPECT a human readable error message + // src.data(); + // Tensor holds the wrong type, it holds N6paddle8platform7float16E at + // [/paddle/Paddle/paddle/fluid/framework/tensor_impl.h:43] +} diff --git a/paddle/fluid/framework/tensor_util.cc b/paddle/fluid/framework/tensor_util.cc index e5bc74755f..ab693004cf 100644 --- a/paddle/fluid/framework/tensor_util.cc +++ b/paddle/fluid/framework/tensor_util.cc @@ -15,6 +15,7 @@ #include #include #include +#include "paddle/fluid/framework/data_type.h" namespace paddle { namespace framework { @@ -69,7 +70,22 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, PADDLE_ENFORCE(platform::is_gpu_place(ctx_place)); auto stream = reinterpret_cast(ctx).stream(); - memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, stream); + if (platform::is_same_place(src_place, dst_place)) { + memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, + stream); + } else { + if (platform::is_same_place(ctx_place, src_place)) { + memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, + stream); + platform::DeviceContextPool::Instance().Get(src.place())->Wait(); + } else if (platform::is_same_place(ctx_place, dst_place)) { + platform::DeviceContextPool::Instance().Get(src.place())->Wait(); + memory::Copy(dst_gpu_place, dst_ptr, src_gpu_place, src_ptr, size, + stream); + } else { + PADDLE_THROW("ctx is not belong to dst_gpu_place or src_gpu_place."); + } + } } #endif } @@ -78,10 +94,10 @@ void TensorCopy(const Tensor& src, const platform::Place& dst_place, Tensor* dst) { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); const platform::DeviceContext* dev_ctx; - if (platform::is_gpu_place(src.place())) { - dev_ctx = pool.Get(src.place()); - } else { + if (platform::is_gpu_place(dst_place)) { dev_ctx = pool.Get(dst_place); + } else { + dev_ctx = pool.Get(src.place()); } TensorCopy(src, dst_place, *dev_ctx, dst); } @@ -246,7 +262,8 @@ void TensorToStream(std::ostream& os, const Tensor& tensor, os.write(out.data(), size); } { // the 3rd field, tensor data - uint64_t size = tensor.memory_size(); + uint64_t size = tensor.numel() * framework::SizeOfType(tensor.type()); + auto* data_ptr = tensor.data(); PADDLE_ENFORCE(size < std::numeric_limits::max(), "Index overflow when writing tensor"); @@ -316,6 +333,9 @@ void TensorFromStream(std::istream& is, Tensor* tensor, tensor->Resize(framework::make_ddim(dims)); void* buf; auto ctx = platform::CPUDeviceContext(); + size_t size = + tensor->numel() * + framework::SizeOfType(framework::ToTypeIndex(desc.data_type())); if (platform::is_gpu_place(dev_ctx.GetPlace())) { #ifdef PADDLE_WITH_CUDA Tensor cpu_tensor; @@ -323,7 +343,7 @@ void TensorFromStream(std::istream& is, Tensor* tensor, framework::VisitDataType( desc.data_type(), DeserializedDataFunctor(&buf, &cpu_tensor, ctx.GetPlace())); - is.read(static_cast(buf), cpu_tensor.memory_size()); + is.read(static_cast(buf), size); auto dst_place = dev_ctx.GetPlace(); framework::TensorCopy(cpu_tensor, dst_place, dev_ctx, tensor); #else @@ -333,7 +353,7 @@ void TensorFromStream(std::istream& is, Tensor* tensor, framework::VisitDataType( desc.data_type(), DeserializedDataFunctor(&buf, tensor, ctx.GetPlace())); - is.read(static_cast(buf), tensor->memory_size()); + is.read(static_cast(buf), size); } } } diff --git a/paddle/fluid/framework/tensor_util.h b/paddle/fluid/framework/tensor_util.h index dca279b693..4457382ade 100644 --- a/paddle/fluid/framework/tensor_util.h +++ b/paddle/fluid/framework/tensor_util.h @@ -23,10 +23,25 @@ limitations under the License. */ namespace paddle { namespace framework { +// NOTE(zcd): Because TensorCopy is an async operation, when the src_place +// and dst_place are two different GPU, to ensure that the operation can +// be carried out correctly, there is a src_ctx wait operation in TensorCopy. +// If ctx_place and src_place are the same, src_ctx.Wait() is added +// after memory::Copy; if ctx_place and dst_place are the same, +// src_ctx.Wait() is added before memory::Copy. void TensorCopy(const Tensor& src, const platform::Place& dst_place, const platform::DeviceContext& ctx, Tensor* dst); + +// NOTE(zcd): If the src.place() and dst_place are two different GPU, +// the copy operation is carried out on the dst_place's stream. This is +// very important, because TensorCopy is an async operator, and in most +// case, once this copy operator returns, dst is to be used in dst_place's +// stream, if this copy operation is carried out on the src_place's stream, +// when dst is used in dst_place's stream the copy operation may be +// not completed. void TensorCopy(const Tensor& src, const platform::Place& dst_place, Tensor* dst); + void TensorCopySync(const Tensor& src, const platform::Place& dst_place, Tensor* dst); diff --git a/paddle/fluid/framework/type_defs.h b/paddle/fluid/framework/type_defs.h index 4879209ece..e099e40f12 100644 --- a/paddle/fluid/framework/type_defs.h +++ b/paddle/fluid/framework/type_defs.h @@ -35,7 +35,8 @@ using VariableNameMap = std::map>; using Attribute = boost::variant, std::vector, std::vector, bool, - std::vector, BlockDesc*, int64_t>; + std::vector, BlockDesc*, int64_t, + std::vector>; using AttributeMap = std::unordered_map; diff --git a/paddle/fluid/framework/var_type.h b/paddle/fluid/framework/var_type.h index 2b646d78f0..429997c8b8 100644 --- a/paddle/fluid/framework/var_type.h +++ b/paddle/fluid/framework/var_type.h @@ -24,18 +24,24 @@ limitations under the License. */ namespace paddle { namespace framework { + +template +bool IsType(const std::type_index& type_index) { + return type_index == std::type_index(typeid(T)); +} + inline proto::VarType::Type ToVarType(std::type_index type) { - if (type.hash_code() == typeid(LoDTensor).hash_code()) { + if (IsType(type)) { return proto::VarType_Type_LOD_TENSOR; - } else if (type.hash_code() == typeid(LoDRankTable).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_LOD_RANK_TABLE; - } else if (type.hash_code() == typeid(LoDTensorArray).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_LOD_TENSOR_ARRAY; - } else if (type.hash_code() == typeid(SelectedRows).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_SELECTED_ROWS; - } else if (type.hash_code() == typeid(ReaderHolder).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_READER; - } else if (type.hash_code() == typeid(ChannelHolder).hash_code()) { + } else if (IsType(type)) { return proto::VarType_Type_CHANNEL; } else { PADDLE_THROW("ToVarType:Unsupported type %s", type.name()); diff --git a/paddle/fluid/framework/var_type_inference_test.cc b/paddle/fluid/framework/var_type_inference_test.cc index 9e33003b44..7842168f60 100644 --- a/paddle/fluid/framework/var_type_inference_test.cc +++ b/paddle/fluid/framework/var_type_inference_test.cc @@ -22,10 +22,20 @@ limitations under the License. */ namespace paddle { namespace framework { +class NOP : public OperatorBase { + public: + NOP(const std::string &type, const VariableNameMap &inputs, + const VariableNameMap &outputs, const AttributeMap &attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + private: + void RunImpl(const Scope &scope, + const platform::Place &place) const override {} +}; + class SumOpMaker : public OpProtoAndCheckerMaker { public: - SumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "").AsDuplicable(); AddOutput("Out", ""); AddComment(""); diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index 50f635a41a..ba7645aa02 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -1,28 +1,59 @@ -set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor init) +# analysis and tensorrt must be added before creating static library, +# otherwise, there would be undefined reference to them in static library. +add_subdirectory(analysis) +if (TENSORRT_FOUND) + add_subdirectory(tensorrt) +endif() +set(FLUID_CORE_MODULES proto_desc memory lod_tensor executor) + +# TODO(panyx0718): Should this be called paddle_fluid_inference_api_internal? cc_library(paddle_fluid_api SRCS io.cc DEPS ${FLUID_CORE_MODULES} ${GLOB_OP_LIB}) -# Create static library get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) -cc_library(paddle_fluid DEPS ${fluid_modules}) + +# paddle_fluid_origin exclude inference api interface +cc_library(paddle_fluid_origin DEPS ${fluid_modules} paddle_fluid_api) + +if(NOT APPLE) + add_subdirectory(api) +endif() + +# Create static library +cc_library(paddle_fluid DEPS ${fluid_modules} paddle_fluid_api paddle_inference_api) +if(NOT APPLE) + # TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac. + set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym") + set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}") +endif() # Create shared library cc_library(paddle_fluid_shared SHARED - SRCS io.cc - DEPS ${fluid_modules}) + SRCS io.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api.cc ${CMAKE_CURRENT_SOURCE_DIR}/api/api_impl.cc + DEPS ${fluid_modules} paddle_fluid_api) + set_target_properties(paddle_fluid_shared PROPERTIES OUTPUT_NAME paddle_fluid) if(NOT APPLE) # TODO(liuyiqun): Temporarily disable the link flag because it is not support on Mac. set(LINK_FLAGS "-Wl,--version-script ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.map") set_target_properties(paddle_fluid_shared PROPERTIES LINK_FLAGS "${LINK_FLAGS}") + # check symbol hidden + FILE(WRITE ${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake + "execute_process(COMMAND bash -c \"${CMAKE_CURRENT_SOURCE_DIR}/check_symbol.sh" + " ${CMAKE_CURRENT_BINARY_DIR}/libpaddle_fluid.so\" RESULT_VARIABLE symbol_res)\n" + "if(NOT \"\${symbol_res}\" STREQUAL \"0\")\n" + " message(FATAL_ERROR \"Check symbol failed.\")\n" + "endif()\n") + add_custom_command( + OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol" + COMMAND ${CMAKE_COMMAND} -P "${CMAKE_CURRENT_BINARY_DIR}/check_symbol.cmake" + DEPENDS paddle_fluid_shared) + add_custom_target(check_symbol ALL DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/.check_symbol") endif() if(WITH_TESTING) + # both tests/book and analysis depends the models that generated by python/paddle/fluid/tests/book add_subdirectory(tests/book) endif() - -if (TENSORRT_FOUND) - add_subdirectory(tensorrt) -endif() diff --git a/paddle/fluid/inference/analysis/CMakeLists.txt b/paddle/fluid/inference/analysis/CMakeLists.txt new file mode 100644 index 0000000000..27fe575cb6 --- /dev/null +++ b/paddle/fluid/inference/analysis/CMakeLists.txt @@ -0,0 +1,45 @@ +cc_library(analysis SRCS pass_manager.cc dot.cc node.cc data_flow_graph.cc graph_traits.cc subgraph_splitter.cc + fluid_to_data_flow_graph_pass.cc + data_flow_graph_to_fluid_pass.cc + dfg_graphviz_draw_pass.cc + tensorrt_subgraph_pass.cc + tensorrt_subgraph_node_mark_pass.cc + analyzer.cc + helper.cc + model_store_pass.cc + DEPS framework_proto proto_desc) +cc_test(test_node SRCS node_tester.cc DEPS analysis) +cc_test(test_dot SRCS dot_tester.cc DEPS analysis) +cc_binary(inference_analyzer SRCS analyzer_main.cc DEPS analysis) + +set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) + +function (inference_analysis_test TARGET) + if(WITH_TESTING) + set(options "") + set(oneValueArgs "") + set(multiValueArgs SRCS) + cmake_parse_arguments(analysis_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + set(mem_opt "") + if(WITH_GPU) + set(mem_opt "--fraction_of_gpu_memory_to_use=0.5") + endif() + cc_test(${TARGET} + SRCS "${analysis_test_SRCS}" + DEPS analysis + ARGS --inference_model_dir=${PYTHON_TESTS_DIR}/book/word2vec.inference.model ${mem_opt}) + set_tests_properties(${TARGET} PROPERTIES DEPENDS test_word2vec) + endif(WITH_TESTING) +endfunction(inference_analysis_test) + +inference_analysis_test(test_data_flow_graph SRCS data_flow_graph_tester.cc) +inference_analysis_test(test_data_flow_graph_to_fluid_pass SRCS data_flow_graph_to_fluid_pass_tester.cc) +inference_analysis_test(test_fluid_to_data_flow_graph_pass SRCS fluid_to_data_flow_graph_pass_tester.cc) +inference_analysis_test(test_subgraph_splitter SRCS subgraph_splitter_tester.cc) +inference_analysis_test(test_dfg_graphviz_draw_pass SRCS dfg_graphviz_draw_pass_tester.cc) +inference_analysis_test(test_tensorrt_subgraph_pass SRCS tensorrt_subgraph_pass_tester.cc) +inference_analysis_test(test_pass_manager SRCS pass_manager_tester.cc) +inference_analysis_test(test_tensorrt_subgraph_node_mark_pass SRCS tensorrt_subgraph_node_mark_pass_tester.cc) +inference_analysis_test(test_analyzer SRCS analyzer_tester.cc) +inference_analysis_test(test_model_store_pass SRCS model_store_pass_tester.cc) diff --git a/paddle/fluid/inference/analysis/README.md b/paddle/fluid/inference/analysis/README.md new file mode 100644 index 0000000000..70adb4a974 --- /dev/null +++ b/paddle/fluid/inference/analysis/README.md @@ -0,0 +1,58 @@ +# Inference Analysis + +The `inference/analysis` module is used to analyze and optimize the inference program, +it references some philosophy from `LLVM/analysis`, +and make the various optimization features be pluggable and co-exist in a pipeline. + +We borrowed some concepts from LLVM, such as + +- [Pass](./pass.h)es to implement optimization that traverse the inference program, +- [DataFlowGraph](./data_flow_graph.h) to represent the data flow graph built from a program, +- [PassManager](./pass_manager.h) to manage a sequence of `Pass`es over a graph. + +There are some other basic concepts here + +- [Node](./node.h), the node in a `DataFlowGraph`, + - `Function`, the Operator in Fluid, + - `Value`, the Variable in Fluid; +- [Argument](./argument.h), the argument that treat as the input and output of all `Pass`es in the pipeline, + +## How it works + +The `inference/analysis` module make all the passes in a pipeline, and works in such way: + +1. Build a `DataFlowGraph` from a Fluid inference ProgramDesc, +2. Call the middle passes one by one, the same `DataFlowGraph` is passed across all the passes, +3. Transform a new ProgramDesc from the modified `DataFlowGraph`. + +The new optimization features can be added as an independent `Pass` and controlled by gflags, +each pass will generate unified debug information or visualization for better debugging. + +## Supported Passes + +### `FluidToDataFlowGraphPass` +Transform the fluid `ProgramDesc` to a `DataFlowGraph` to give an abstract representation for all the middle passes, +this should be the first pass of the pipeline. + +### `DataFlowGraphToFluidPass` +Generate a final `ProgramDesc` from a data flow graph, this should be the last pass of the pipeline. + +### `TensorRTSubgraphNodeMarkPass` +Mark the `Node` that are supported by TensorRT, +this pass will generate a visualization file which can be used for debugging. + +### `TensorRTSubGraphPass` +Split the sub-graph that are can be accelerated by TensorRT. + +### `DFG_GraphvizDrawPass` +This pass is just for debug, it will visualize the `DataFlowGraph` using the [graphviz](http://www.graphviz.org) tool. + +It can be used as a helper class that draws the modified graph after each pass. + +## Utilities + +There is some helper legacy/function/class for analysis. + +- [dot.h](./dot.h) give a easy to use interface for generating `DOT` codes, +- [graph_traits.h](./graph_traits.h) contains the interfaces of the graph traversal algorithms, it uses `iterator`to make the algorithms easy to share across different passes, +there are some implementations in [data_flow_graph.cc](./data_flow_graph.cc) , such as BFS and DFS.. diff --git a/paddle/fluid/inference/analysis/analyzer.cc b/paddle/fluid/inference/analysis/analyzer.cc new file mode 100644 index 0000000000..9318f10897 --- /dev/null +++ b/paddle/fluid/inference/analysis/analyzer.cc @@ -0,0 +1,100 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/analyzer.h" +#include +#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" +#include "paddle/fluid/inference/analysis/model_store_pass.h" +#include "paddle/fluid/inference/analysis/pass_manager.h" +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h" +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h" + +namespace paddle { + +DEFINE_bool(inference_analysis_enable_tensorrt_subgraph_engine, true, + "Enable subgraph to TensorRT engine for acceleration"); + +DEFINE_string(inference_analysis_graphviz_log_root, "./", + "Graphviz debuger for data flow graphs."); + +DEFINE_string(inference_analysis_output_storage_path, "", + "optimized model output path"); + +namespace inference { +namespace analysis { + +class DfgPassManagerImpl final : public DfgPassManager { + public: + DfgPassManagerImpl() { + // TODO(Superjomn) set the key with pass reprs. + AddPass("fluid-to-data-flow-graph", new FluidToDataFlowGraphPass); + if (FLAGS_inference_analysis_enable_tensorrt_subgraph_engine) { + auto trt_teller = [&](const Node* node) { + std::unordered_set teller_set( + {"elementwise_add", "mul", "conv2d", "pool2d", "relu", "softmax"}); + if (!node->IsFunction()) return false; + + const auto* func = static_cast(node); + if (teller_set.count(func->func_type())) { + return true; + } else { + return false; + } + }; + + AddPass("tensorrt-subgraph-marker", + new TensorRTSubgraphNodeMarkPass(trt_teller)); + AddPass("tensorrt-subgraph", new TensorRTSubGraphPass(trt_teller)); + } + AddPass("data-flow-graph-to-fluid", new DataFlowGraphToFluidPass); + if (!FLAGS_inference_analysis_output_storage_path.empty()) { + AddPass("model-store-pass", new ModelStorePass); + } + } + + std::string repr() const override { return "dfg-pass-manager"; } + std::string description() const override { return "DFG pass manager."; } + + private: + void AddPass(const std::string& name, Pass* pass) { + LOG(INFO) << "Adding pass " << name; + Register(name, pass); + AddGraphvizDebugerPass(pass); + } + + // Add the graphviz debuger pass if the parent pass has one. + void AddGraphvizDebugerPass(Pass* pass) { + auto* debuger_pass = pass->CreateGraphvizDebugerPass(); + if (debuger_pass) { + LOG(INFO) << " - register debug pass [" << debuger_pass->repr() << "]"; + Register(debuger_pass->repr(), debuger_pass); + } + } +}; + +Analyzer::Analyzer() { Register("manager1", new DfgPassManagerImpl); } + +void Analyzer::Run(Argument* argument) { + for (auto& x : data_) { + PADDLE_ENFORCE(x->Initialize(argument)); + x->RunAll(); + PADDLE_ENFORCE(x->Finalize()); + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/analyzer.h b/paddle/fluid/inference/analysis/analyzer.h new file mode 100644 index 0000000000..c82fdfff86 --- /dev/null +++ b/paddle/fluid/inference/analysis/analyzer.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +/* + * This file contains Analyzer, an class that exposed as a library that analyze + * and optimize Fluid ProgramDesc for inference. Similar to LLVM, it has + * multiple flags to + * control whether an process is applied on the program. + * + * The processes are called Passes in analysis, the Passes are placed in a + * pipeline, the first Pass is the FluidToDataFlowGraphPass which transforms a + * Fluid ProgramDesc to + * a data flow graph, the last Pass is DataFlowGraphToFluidPass which transforms + * a data flow graph to a Fluid ProgramDesc. The passes in the middle of the + * pipeline can be any Passes + * which take a node or data flow graph as input. + * + * The Analyzer can be used in two methods, the first is a executable file which + * can be used to pre-process the inference model and can be controlled by + * passing difference command flags; + * the other way is to compose inside the inference API as a runtime pre-process + * phase in the inference service. + */ + +#include +#include "paddle/fluid/inference/analysis/pass.h" +#include "paddle/fluid/inference/analysis/pass_manager.h" + +namespace paddle { + +// TODO(Superjomn) add a definition flag like PADDLE_WITH_TENSORRT and hide this +// flag if not available. +DECLARE_bool(inference_analysis_enable_tensorrt_subgraph_engine); +DECLARE_string(inference_analysis_graphviz_log_root); +DECLARE_string(inference_analysis_output_storage_path); + +namespace inference { +namespace analysis { + +class Analyzer : public OrderedRegistry { + public: + // Register all the pass-managers. + Analyzer(); + + void Run(Argument* argument); + + DISABLE_COPY_AND_ASSIGN(Analyzer); +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/analyzer_main.cc b/paddle/fluid/inference/analysis/analyzer_main.cc new file mode 100644 index 0000000000..5e1fe3eb79 --- /dev/null +++ b/paddle/fluid/inference/analysis/analyzer_main.cc @@ -0,0 +1,33 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file implements analysizer -- an executation help to analyze and + * optimize trained model. + */ +#include "paddle/fluid/inference/analysis/analyzer.h" +#include +#include + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, true); + using paddle::inference::analysis::Analyzer; + using paddle::inference::analysis::Argument; + + Argument argument; + Analyzer analyzer; + analyzer.Run(&argument); + + return 0; +} diff --git a/paddle/fluid/inference/analysis/analyzer_tester.cc b/paddle/fluid/inference/analysis/analyzer_tester.cc new file mode 100644 index 0000000000..24bfb3993c --- /dev/null +++ b/paddle/fluid/inference/analysis/analyzer_tester.cc @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/analyzer.h" +#include +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(Analyzer, analysis_without_tensorrt) { + FLAGS_inference_analysis_enable_tensorrt_subgraph_engine = false; + Argument argument; + argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir)); + Analyzer analyser; + analyser.Run(&argument); +} + +TEST(Analyzer, analysis_with_tensorrt) { + FLAGS_inference_analysis_enable_tensorrt_subgraph_engine = true; + Argument argument; + argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir)); + Analyzer analyser; + analyser.Run(&argument); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/argument.cc b/paddle/fluid/inference/analysis/argument.cc new file mode 100644 index 0000000000..cb0263d5d9 --- /dev/null +++ b/paddle/fluid/inference/analysis/argument.cc @@ -0,0 +1,15 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/argument.h" diff --git a/paddle/fluid/inference/analysis/argument.h b/paddle/fluid/inference/analysis/argument.h new file mode 100644 index 0000000000..a17d6281a2 --- /dev/null +++ b/paddle/fluid/inference/analysis/argument.h @@ -0,0 +1,72 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file defines the class Argument, which is the input and output of the + * analysis module. All the fields that needed either by Passes or PassManagers + * are contained in Argument. + * + * TODO(Superjomn) Find some way better to contain the fields when it grow too + * big. + */ + +#pragma once + +#include +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * The argument definition of both Pass and PassManagers. + * + * All the fields should be registered here for clearness. + */ +struct Argument { + Argument() = default; + explicit Argument(const std::string& fluid_model_dir) + : fluid_model_dir(new std::string(fluid_model_dir)) {} + // The directory of the trained model. + std::unique_ptr fluid_model_dir; + // The path of `__model__` and `param`, this is used when the file name of + // model and param is changed. + std::unique_ptr fluid_model_program_path; + std::unique_ptr fluid_model_param_path; + + // The graph that process by the Passes or PassManagers. + std::unique_ptr main_dfg; + + // The original program desc. + std::unique_ptr origin_program_desc; + + // The processed program desc. + std::unique_ptr transformed_program_desc; + + // The output storage path of ModelStorePass. + std::unique_ptr model_output_store_path; +}; + +#define UNLIKELY(condition) __builtin_expect(static_cast(condition), 0) +#define ANALYSIS_ARGUMENT_CHECK_FIELD(field__) \ + if (UNLIKELY(!(field__))) { \ + LOG(ERROR) << "field " << #field__ << " should be set."; \ + return false; \ + } + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph.cc b/paddle/fluid/inference/analysis/data_flow_graph.cc new file mode 100644 index 0000000000..7f64bc75ae --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph.cc @@ -0,0 +1,370 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/dot.h" +#include "paddle/fluid/inference/analysis/node.h" + +namespace paddle { +namespace inference { +namespace analysis { + +// It is a better idea that the inputs and outputs of this graph is set manually +// before, but there must be a Pass that helps to prune the unnecessary ops that +// do not contribute to the given targets, so in this pass, analysis and get the +// inputs and outputs is OK. +void DataFlowGraph::Build() { + inputs.clear(); + outputs.clear(); + std::unordered_set ins; + std::unordered_set outs; + for (auto &node : nodes.nodes()) { + for (auto *in : node->inlinks) { + ins.insert(in); + } + for (auto *out : node->outlinks) { + outs.insert(out); + } + } + + // The nodes that in ins but not in outs is the graph's inputs + // similarly, the nodes that in outs but not in ins is the graphs' outputs + for (auto *in : ins) { + if (!outs.count(in)) { + inputs.push_back(in); + } + } + for (auto *out : outs) { + if (!outs.count(out)) { + outputs.push_back(out); + } + } + + Clean(); +} + +void DataFlowGraph::Clean() { + for (auto &node : nodes.nodes()) { + std::unordered_set inlinks_set(node->inlinks.begin(), + node->inlinks.end()); + std::unordered_set outlinks_set(node->outlinks.begin(), + node->outlinks.end()); + if (inlinks_set.size() < node->inlinks.size()) { + LOG(INFO) << "Clean: node " << node->repr() << " prune duplicate inputs"; + node->inlinks.assign(inlinks_set.begin(), inlinks_set.end()); + } + if (outlinks_set.size() < node->outlinks.size()) { + LOG(INFO) << "Clean: node " << node->repr() << " prune duplicate inputs"; + node->outlinks.assign(outlinks_set.begin(), outlinks_set.end()); + } + } +} + +std::string DataFlowGraph::DotString() const { + Dot dot; + + // Add nodes + for (size_t i = 0; i < nodes.size(); i++) { + const Node &node = nodes.Get(i); + dot.AddNode(node.repr(), node.dot_attrs()); + } + + // Add edges + for (size_t i = 0; i < nodes.size(); i++) { + const Node &node = nodes.Get(i); + for (auto &in : node.inlinks) { + dot.AddEdge(in->repr(), node.repr(), {}); + } + } + return dot.Build(); +} + +std::string DataFlowGraph::HumanReadableInfo(bool show_values, + bool show_functions) const { + std::stringstream values, functions; + for (auto &n : nodes.nodes()) { + if (show_values && n->IsValue()) { + values << n->repr() << "\n"; + } + if (show_functions && n->IsFunction()) { + functions << n->repr() << "\n"; + } + } + return "Values:\n" + values.str() + "\n\n" + "Functions:\n" + functions.str(); +} + +// +// NodesBFSIterator +// + +GraphTraits::NodesBFSIterator::NodesBFSIterator( + const std::vector &source) + : queue_(source.begin(), source.end()) {} + +// GraphTraits::NodesBFSIterator::NodesBFSIterator( +// GraphTraits::NodesBFSIterator &&other) noexcept +// : queue_(std::move(other.queue_)), +// visited_(std::move(other.visited_)) {} + +GraphTraits::NodesBFSIterator::NodesBFSIterator( + const GraphTraits::NodesBFSIterator &other) + : queue_(other.queue_), visited_(other.visited_) {} + +Node &GraphTraits::NodesBFSIterator::operator*() { + PADDLE_ENFORCE(!queue_.empty()); + return *queue_.front(); +} + +Node *GraphTraits::NodesBFSIterator::operator->() { + PADDLE_ENFORCE(!queue_.empty()); + return queue_.front(); +} + +GraphTraits::NodesBFSIterator & +GraphTraits::NodesBFSIterator::operator=( + const GraphTraits::NodesBFSIterator &other) { + queue_ = other.queue_; + visited_ = other.visited_; + return *this; +} + +GraphTraits::NodesBFSIterator + &GraphTraits::NodesBFSIterator::operator++() { + PADDLE_ENFORCE(!queue_.empty()); + auto *cur = queue_.front(); + visited_.insert(cur); + queue_.pop_front(); + for (auto *output : cur->outlinks) { + if (!visited_.count(output)) { + queue_.push_back(output); + visited_.insert(output); + } + } + return *this; +} + +bool GraphTraits::NodesBFSIterator::operator==( + const GraphTraits::NodesBFSIterator &other) { + if (queue_.empty()) return other.queue_.empty(); + if ((!queue_.empty()) && (!other.queue_.empty())) { + return queue_.front() == other.queue_.front() && + visited_.size() == other.visited_.size(); // here need to check the + // equality of queue and + // visited. Just a light but week implementation. + } + return false; +} + +// +// NodesDFSIterator +// +GraphTraits::NodesDFSIterator::NodesDFSIterator( + const std::vector &source) { + for (auto *x : source) stack_.push(x); +} + +// GraphTraits::NodesDFSIterator::NodesDFSIterator( +// GraphTraits::NodesDFSIterator &&other) noexcept +// : stack_(std::move(other.stack_)), +// visited_(std::move(other.visited_)) {} + +GraphTraits::NodesDFSIterator::NodesDFSIterator( + const GraphTraits::NodesDFSIterator &other) + : stack_(other.stack_), visited_(other.visited_) {} + +Node &GraphTraits::NodesDFSIterator::operator*() { + PADDLE_ENFORCE(!stack_.empty()); + return *stack_.top(); +} + +GraphTraits::NodesDFSIterator + &GraphTraits::NodesDFSIterator::operator++() { + if (stack_.empty()) return *this; + visited_.insert(stack_.top()); + auto *cur = stack_.top(); + stack_.pop(); + for (auto *x : cur->outlinks) { + if (!visited_.count(x)) { + stack_.push(x); + visited_.insert(x); + } + } + return *this; +} +bool GraphTraits::NodesDFSIterator::operator==( + const GraphTraits::NodesDFSIterator &other) { + if (stack_.empty()) return other.stack_.empty(); + if ((!stack_.empty()) && (!other.stack_.empty())) { + return stack_.top() == other.stack_.top(); + } + return false; +} + +GraphTraits::NodesDFSIterator & +GraphTraits::NodesDFSIterator::operator=( + const GraphTraits::NodesDFSIterator &other) { + stack_ = other.stack_; + visited_ = other.visited_; + return *this; +} +Node *GraphTraits::NodesDFSIterator::operator->() { + return stack_.top(); +} + +inline bool CheckNodeIndegreeEquals(const Node &node, size_t n) { + return node.inlinks.size() == n; +} + +GraphTraits::NodesTSIterator::NodesTSIterator( + const std::vector &source) { + PADDLE_ENFORCE(!source.empty(), + "Start points of topological sorting should not be empty!"); + // CHECK all the inputs' in-degree is 0 + for (auto *node : source) { + PADDLE_ENFORCE(CheckNodeIndegreeEquals(*node, 0)); + } + + std::unordered_set visited; + std::unordered_set to_visit{source.begin(), source.end()}; + + std::vector inlink_visited; + while (!to_visit.empty()) { + std::vector queue(to_visit.begin(), to_visit.end()); + for (auto *p : queue) { + if (p->deleted()) { + visited.insert(p); + to_visit.erase(p); + continue; + } + inlink_visited.clear(); + + std::copy_if(p->inlinks.begin(), p->inlinks.end(), + std::back_inserter(inlink_visited), + [&](Node *x) { return visited.count(x); }); + + if (inlink_visited.size() == p->inlinks.size()) { + sorted_.push_back(p); + for (auto *_ : p->outlinks) { + if (!visited.count(_)) { + to_visit.insert(_); + } + } + + to_visit.erase(p); + visited.insert(p); + } + } + } +} + +GraphTraits::NodesTSIterator::NodesTSIterator( + const paddle::inference::analysis::GraphTraits< + DataFlowGraph>::NodesTSIterator &other) + : sorted_(other.sorted_), cursor_(other.cursor_) {} + +Node &GraphTraits::NodesTSIterator::operator*() { + PADDLE_ENFORCE_LT(cursor_, sorted_.size()); + return *sorted_[cursor_]; +} + +paddle::inference::analysis::GraphTraits::NodesTSIterator + &GraphTraits::NodesTSIterator::operator++() { + if (++cursor_ >= sorted_.size()) { + sorted_.clear(); + cursor_ = 0; + } + return *this; +} +paddle::inference::analysis::GraphTraits::NodesTSIterator & +GraphTraits::NodesTSIterator::operator=( + const paddle::inference::analysis::GraphTraits< + DataFlowGraph>::NodesTSIterator &other) { + cursor_ = other.cursor_; + sorted_ = other.sorted_; + return *this; +} + +bool GraphTraits::NodesTSIterator::operator==( + const paddle::inference::analysis::GraphTraits< + DataFlowGraph>::NodesTSIterator &other) { + return sorted_ == other.sorted_ && cursor_ == other.cursor_; +} + +Node *GraphTraits::NodesTSIterator::operator->() { + PADDLE_ENFORCE_LT(cursor_, sorted_.size()); + return sorted_[cursor_]; +} + +std::pair, std::vector> +ExtractInputAndOutputOfSubGraph(std::vector &graph) { // NOLINT + std::unordered_set nodes(graph.begin(), graph.end()); + std::unordered_set inputs; + std::unordered_set outputs; + // Input a Value, check whether its inlink is in the subgraph. + auto inlink_in_subgraph = [&](Node *n) { + for (auto *in : n->inlinks) { + if (nodes.count(in)) return true; + } + return false; + }; + for (auto &node : graph) { + for (auto *in : node->inlinks) { + // The Value that is written by nodes inside a sub-graph shouldn't be the + // input of the sub-graph. + if (!nodes.count(in) && in->type() == Node::Type::kValue && + !inlink_in_subgraph(in)) { + inputs.insert(in); + } + } + for (auto *out : node->outlinks) { + if (!nodes.count(out) && out->type() == Node::Type::kValue) { + outputs.insert(out); + } + } + } + return std::make_pair(std::vector(inputs.begin(), inputs.end()), + std::vector(outputs.begin(), outputs.end())); +} + +void FilterRedundantOutputOfSubGraph(DataFlowGraph *graph) { + std::vector op_nodes; + for (auto &node : GraphTraits(graph).nodes_in_TS()) { + if (node.type() == Node::Type::kValue || node.deleted()) { + continue; + } + op_nodes.push_back(&node); + } + size_t op_num = op_nodes.size(); + for (size_t i = 0; i < op_num; i++) { + if (op_nodes[i]->type() == Node::Type::kFunction) continue; + std::unordered_set follow_up_input_names; + for (size_t j = i + 1; j < op_num; j++) { + for (auto *in : op_nodes[j]->inlinks) { + follow_up_input_names.insert(in->name()); + } + } + std::vector filtered_subgraph_outlinks; + for (auto *out : op_nodes[i]->outlinks) { + if (follow_up_input_names.count(out->name())) { + filtered_subgraph_outlinks.push_back(out); + } + } + PADDLE_ENFORCE_GE(filtered_subgraph_outlinks.size(), 1UL); + op_nodes[i]->outlinks = filtered_subgraph_outlinks; + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph.h b/paddle/fluid/inference/analysis/data_flow_graph.h new file mode 100644 index 0000000000..bb3ec6bbc1 --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph.h @@ -0,0 +1,184 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * Data flow graph is an pass that build the basic graph. It contains a graph + * and the iterators that enable the iteration over the graph. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "paddle/fluid/inference/analysis/graph_traits.h" +#include "paddle/fluid/inference/analysis/node.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * DataFlowGraph - A container of Value and Function Nodes. + * + * This is the base graph for any other type of graphs, such as SSA or CFG. + */ +struct DataFlowGraph { + NodeMap nodes; + std::vector inputs; + std::vector outputs; + + // Extract inputs and outputs of the graph. + void Build(); + + // Output a DOT graph file for debug. + std::string DotString() const; + + std::string HumanReadableInfo(bool show_values = true, + bool show_functions = true) const; + + private: + // Remove duplicate edges and so on. + void Clean(); +}; + +/* + * An graph trait help to traverse the graph using BFS. + * The BFS start from a graph's inputs, the graph should be fully-connected, so + * that the iterator can reach the end. + */ +template <> +struct GraphTraits { + // BFS iterator on nodes. + struct NodesBFSIterator + : public std::iterator { + NodesBFSIterator() = default; + explicit NodesBFSIterator(const std::vector &source); + // NodesBFSIterator(NodesBFSIterator &&other) noexcept; + // NOTE Heavy to use. + NodesBFSIterator(const NodesBFSIterator &other); + + Node &operator*(); + NodesBFSIterator &operator++(); + Node *operator->(); + // TODO(Superjomn) current implementation just compare the first + // element, need to compare the graph and all the elements in the queue and + // set. + NodesBFSIterator &operator=(const NodesBFSIterator &other); + bool operator==(const NodesBFSIterator &other); + bool operator!=(const NodesBFSIterator &other) { return !(*this == other); } + + private: + std::deque queue_; + std::unordered_set visited_; + }; + + // DFS iterator on nodes. + struct NodesDFSIterator + : public std::iterator { + NodesDFSIterator() = default; + explicit NodesDFSIterator(const std::vector &source); + // NodesDFSIterator(NodesDFSIterator &&other) noexcept; + NodesDFSIterator(const NodesDFSIterator &other); + + Node &operator*(); + NodesDFSIterator &operator++(); + // TODO(Superjomn) current implementation just compare the first + // element, need to compare the graph and all the elements in the queue and + // set. + NodesDFSIterator &operator=(const NodesDFSIterator &other); + bool operator==(const NodesDFSIterator &other); + bool operator!=(const NodesDFSIterator &other) { return !(*this == other); } + Node *operator->(); + + private: + std::stack stack_; + std::unordered_set visited_; + }; + + // Topological sorting iterator on nodes. + struct NodesTSIterator + : public std::iterator { + NodesTSIterator() = default; + explicit NodesTSIterator(const std::vector &source); + NodesTSIterator(NodesTSIterator &&other) + : sorted_(std::move(other.sorted_)), cursor_(other.cursor_) { + other.cursor_ = 0; + } + NodesTSIterator(const NodesTSIterator &other); + + Node &operator*(); + NodesTSIterator &operator++(); + // TODO(Superjomn) current implementation just compare the first + // element, need to compare the graph and all the elements in the queue and + // set. + NodesTSIterator &operator=(const NodesTSIterator &other); + bool operator==(const NodesTSIterator &other); + bool operator!=(const NodesTSIterator &other) { return !(*this == other); } + Node *operator->(); + + private: + std::vector sorted_; + size_t cursor_{0}; + }; + + explicit GraphTraits(DataFlowGraph *graph) : graph_(graph) {} + + // default use BFS to visit the nodes. + iterator_range nodes() { + return iterator_range(nodes_bfs_begin(), nodes_bfs_end()); + } + iterator_range nodes_in_BFS() { + return iterator_range(nodes_bfs_begin(), nodes_bfs_end()); + } + iterator_range nodes_in_DFS() { + return iterator_range(nodes_dfs_begin(), nodes_dfs_end()); + } + iterator_range nodes_in_TS() { + return iterator_range(nodes_ts_begin(), nodes_ts_end()); + } + + private: + NodesBFSIterator nodes_bfs_begin() { + return NodesBFSIterator(graph_->inputs); + } + NodesBFSIterator nodes_bfs_end() { return NodesBFSIterator(); } + + NodesDFSIterator nodes_dfs_begin() { + return NodesDFSIterator(graph_->inputs); + } + NodesDFSIterator nodes_dfs_end() { return NodesDFSIterator(); } + + NodesTSIterator nodes_ts_begin() { return NodesTSIterator(graph_->inputs); } + NodesTSIterator nodes_ts_end() { return NodesTSIterator(); } + + private: + DataFlowGraph *graph_; +}; + +// Extract the inputs and outputs of a graph. The inputs and outputs of a +// sub-graph is the inputs nodes and output nodes that doesn't inside the +// sub-graph. +std::pair, std::vector> +ExtractInputAndOutputOfSubGraph(std::vector &graph); // NOLINT + +void FilterRedundantOutputOfSubGraph(DataFlowGraph *graph); +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph_tester.cc b/paddle/fluid/inference/analysis/data_flow_graph_tester.cc new file mode 100644 index 0000000000..a881262665 --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph_tester.cc @@ -0,0 +1,127 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(DataFlowGraph, BFS) { + auto desc = LoadProgramDesc(FLAGS_inference_model_dir + "/__model__"); + auto dfg = ProgramDescToDFG(desc); + dfg.Build(); + + for (auto *in : dfg.inputs) { + LOG(INFO) << "inputs: " << in->name() << " " + << static_cast(in->type()); + } + for (auto *out : dfg.outputs) { + LOG(INFO) << "outputs: " << out->name() << " " + << static_cast(out->type()); + } + + GraphTraits trait(&dfg); + auto nodes = trait.nodes(); + size_t count = 0; + for (auto it = nodes.begin(); it != nodes.end(); ++it) { + LOG(INFO) << "visiting " << it->name(); + ++count; + } + ASSERT_EQ(count, dfg.nodes.size()); +} + +TEST(DataFlowGraph, DFS) { + auto desc = LoadProgramDesc(FLAGS_inference_model_dir + "/__model__"); + auto dfg = ProgramDescToDFG(desc); + dfg.Build(); + GraphTraits trait(&dfg); + auto nodes = trait.nodes_in_DFS(); + size_t count = 0; + for (auto it = nodes.begin(); it != nodes.end(); ++it) { + LOG(INFO) << "visiting " << it->name(); + ++count; + } + ASSERT_EQ(count, dfg.nodes.size()); +} + +// Topological sorting. +/* + * Graph topology + * inputs: 0, 1, 2 + * 0 -> 4 + * 0 -> 5 + * 1 -> 6 + * 2 -> 7 + * 4 -> 5 + * 4 -> 7 + * 4 -> 3 + * 7 -> 3 + */ +TEST(DataFlowGraph, TS) { + DataFlowGraph graph; + + for (int i = 0; i < 8; i++) { + auto *node = graph.nodes.Create(Node::Type::kValue); + node->SetName("node-" + std::to_string(i)); + } + + auto add_link = [&](int i, int j) { + Node *source = graph.nodes.GetMutable(i); + Node *target = graph.nodes.GetMutable(j); + target->inlinks.push_back(source); + source->outlinks.push_back(target); + }; + + graph.inputs.push_back(graph.nodes.GetMutable(0)); + graph.inputs.push_back(graph.nodes.GetMutable(1)); + graph.inputs.push_back(graph.nodes.GetMutable(2)); + + add_link(0, 4); + add_link(0, 5); + add_link(1, 6); + add_link(2, 7); + add_link(4, 5); + add_link(4, 7); + add_link(4, 3); + add_link(7, 3); + + auto its = GraphTraits(&graph).nodes_in_TS(); + std::vector sorted_ids; + for (auto it = its.begin(); it != its.end(); ++it) { + LOG(INFO) << it->name(); + sorted_ids.push_back(it->id()); + } + + // Assert a occurs prior to b in the sorted_ids. + auto assert_positive_sequence_pair = [&](int a, int b) { + auto a_offset = std::find(sorted_ids.begin(), sorted_ids.end(), a); + auto b_offset = std::find(sorted_ids.begin(), sorted_ids.end(), b); + ASSERT_LT(a_offset, b_offset); + }; + + assert_positive_sequence_pair(2, 7); + assert_positive_sequence_pair(7, 3); + assert_positive_sequence_pair(4, 3); + assert_positive_sequence_pair(0, 4); + assert_positive_sequence_pair(0, 5); + assert_positive_sequence_pair(1, 6); + assert_positive_sequence_pair(4, 5); + assert_positive_sequence_pair(4, 7); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc new file mode 100644 index 0000000000..18c32fa091 --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.cc @@ -0,0 +1,267 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" +#include +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/proto_desc.h" +#include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" + +namespace paddle { +namespace inference { + +DEFINE_int32(tensorrt_max_batchsize, 3, "TensorRT maximum batch size"); +DEFINE_int32(tensorrt_workspace_size, 2048, "TensorRT workspace size"); + +namespace analysis { + +using framework::proto::ProgramDesc; + +std::vector ExtractParameters( + const std::vector> &nodes); + +bool DataFlowGraphToFluidPass::Initialize(Argument *argument) { + ANALYSIS_ARGUMENT_CHECK_FIELD(argument) + ANALYSIS_ARGUMENT_CHECK_FIELD(argument->origin_program_desc) + PADDLE_ENFORCE(!argument->transformed_program_desc); + // The transformed_program_desc should inherit all the VarDesc and BlockDesc + // from the original program desc. The operators of the main block(the first + // block) should rewritten by data flow graph. + argument->transformed_program_desc.reset( + new ProgramDesc(*argument->origin_program_desc)); + argument->transformed_program_desc->mutable_blocks(framework::kRootBlockIndex) + ->clear_ops(); + desc_ = argument->transformed_program_desc.get(); + argument_ = argument; + return true; +} + +bool DataFlowGraphToFluidPass::Finalize() { return true; } + +void DataFlowGraphToFluidPass::Run(DataFlowGraph *graph) { + FilterRedundantOutputOfSubGraph(graph); + LOG(INFO) << "graph.inputs " << graph->inputs.size(); + for (auto &node : GraphTraits(graph).nodes_in_TS()) { + if (node.deleted()) continue; + + switch (node.type()) { + case Node::Type::kFunction: { + LOG(INFO) << "add function " << node.repr(); + AddFluidOp(&node); + } break; + case Node::Type::kFunctionBlock: { + LOG(INFO) << "add engine op " << node.repr() << " , " + << static_cast(&node)->subgraph.size(); + AddEngineOp(&node); + } break; + default: + continue; + } + } + + PADDLE_ENFORCE(argument_->transformed_program_desc.get()); +} + +void DataFlowGraphToFluidPass::AddFluidOp(Node *node) { + auto *ori_op = static_cast(node->pb_desc()); + // currently only the main block is analyzed. + auto *main_block = desc_->mutable_blocks(framework::kRootBlockIndex); + auto *op = main_block->add_ops(); + *op = *ori_op; // copy the attributes, by default, these will not be changed + // by analysis phrase. + // The inputs and outputs of the existing ops are not changed by tensorrt + // subgraph pass. + // NOTE It might be changed by other passes in the long run. +} + +void CreateTrtEngineOp(Node *node, const DataFlowGraph &graph, + framework::proto::BlockDesc *block) { + static int counter{0}; + PADDLE_ENFORCE(node->IsFunctionBlock()); + framework::OpDesc desc; + auto *func = static_cast(node); + + // collect inputs + std::unordered_set input_names; + for (auto *x : func->inlinks) { + input_names.insert(x->name()); + } + desc.SetInput( + "Xs", std::vector(input_names.begin(), input_names.end())); + + std::unordered_set output_names; + for (auto *x : func->outlinks) { + output_names.insert(x->name()); + } + + std::vector output_temp(output_names.begin(), + output_names.end()); + desc.SetOutput("Ys", output_temp); + desc.SetType("tensorrt_engine"); + + std::unordered_map output_name_map; + + // The following procedure is used to rename all the intermediate + // variables and the output variables of the subgraph. + // Why we do this? + // During the transition from fluid OP to tensorrt OP, we map + // the input and output Tensor(fluid data structure) of fluid OP + // to the correspondin ITensor (trt data structure) through the + // Tensor name. When we set up ITensor for an variable, we must + // ensure that it has not been set before. + // If there is variable in the fluid graph, which is not only the + // input of a OP, but also the output of a Op, there will be problems. + // So we have to rename the variable in the subgraph to make sure + // it is either an OP's input or an OP's output. + + auto subgraph_nodes = func->subgraph; + for (int index = 0; index < block->ops_size(); index++) { + framework::proto::OpDesc *op = block->mutable_ops(index); + auto correspond_node = subgraph_nodes[index]; + PADDLE_ENFORCE_EQ(correspond_node->name(), op->type()); + + std::unordered_map var2id; + for (auto *in_var : correspond_node->inlinks) { + var2id[in_var->name()] = in_var->id(); + } + // rename for the input variables of op inside subgraph + for (int i = 0; i < op->inputs_size(); i++) { + framework::proto::OpDesc_Var *in_var = op->mutable_inputs(i); + std::vector replaced_names; + for (int k = 0; k < in_var->arguments_size(); k++) { + std::string arg_value = in_var->arguments(k); + if (input_names.count(arg_value)) { + replaced_names.push_back(arg_value); + } else { + replaced_names.push_back(arg_value + + std::to_string(var2id[arg_value])); + } + } + in_var->clear_arguments(); + for (size_t k = 0; k < replaced_names.size(); k++) { + in_var->add_arguments(replaced_names[k]); + } + } + var2id.clear(); + for (auto out_var : correspond_node->outlinks) { + var2id[out_var->name()] = out_var->id(); + } + + // rename for the output variables of op inside subgraph + for (int i = 0; i < op->outputs_size(); i++) { + framework::proto::OpDesc_Var *out_var = op->mutable_outputs(i); + std::vector replaced_names; + for (int k = 0; k < out_var->arguments_size(); k++) { + std::string arg_value = out_var->arguments(k); + if (output_names.count(arg_value)) { + output_name_map[arg_value] = + arg_value + std::to_string(var2id[arg_value]); + } + replaced_names.push_back(arg_value + std::to_string(var2id[arg_value])); + } + out_var->clear_arguments(); + for (size_t k = 0; k < replaced_names.size(); k++) { + out_var->add_arguments(replaced_names[k]); + } + } + } + // When tensorrt engine runs at the end of the operation, + // output_mapping help us copy the data from the renamed ITensor + // to Tensor. + std::vector output_mapping; + for (auto name : output_names) { + PADDLE_ENFORCE(output_name_map.count(name) != 0); + output_mapping.push_back(output_name_map[name]); + } + + PADDLE_ENFORCE(!block->vars().empty(), "the block has no var-desc"); + // Set attrs + SetAttr(desc.Proto(), "subgraph", block->SerializeAsString()); + SetAttr(desc.Proto(), "engine_uniq_key", "trt-" + std::to_string(counter++)); + SetAttr(desc.Proto(), "max_batch", FLAGS_tensorrt_max_batchsize); + SetAttr(desc.Proto(), "max_workspace", FLAGS_tensorrt_workspace_size); + SetAttr(desc.Proto(), "parameters", ExtractParameters(graph.nodes.nodes())); + SetAttr(desc.Proto(), "output_name_mapping", output_mapping); + node->SetPbMsg(desc.Proto()->SerializeAsString()); +} + +std::vector ExtractParameters( + const std::vector> &nodes) { + std::vector parameters; + for (const auto &node : nodes) { + if (!node->IsValue()) continue; + PADDLE_ENFORCE(!node->pb_msg().empty(), "pb_msg should be set first"); + framework::proto::VarDesc var; + var.ParseFromString(node->pb_msg()); + if (var.persistable()) { + parameters.push_back(var.name()); + } + } + return parameters; +} + +void DataFlowGraphToFluidPass::AddEngineOp(Node *node) { + // TODO(Superjomn) Here need to expose some arguments for default setting. + PADDLE_ENFORCE(node->IsFunctionBlock()); + auto *block_node = static_cast(node); + framework::proto::BlockDesc proto; + framework::BlockDesc block_desc(nullptr, &proto); + block_desc.Proto()->set_parent_idx(-1); + block_desc.Proto()->set_idx(0); + LOG(INFO) << "origin variable size: " + << argument_->origin_program_desc->blocks(0).vars().size(); + LOG(INFO) << "transformed variable size: " + << block_desc.Proto()->vars().size(); + // copy ops. + + for (auto *node : block_node->subgraph) { + auto *op = block_desc.AppendOp(); + PADDLE_ENFORCE(!node->pb_msg().empty()); + op->Proto()->ParseFromString(node->pb_msg()); + } + + *block_desc.Proto()->mutable_vars() = + argument_->origin_program_desc->blocks(0).vars(); + PADDLE_ENFORCE(!block_desc.Proto()->vars().empty()); + CreateTrtEngineOp(node, *argument_->main_dfg, block_desc.Proto()); + auto *main_block = desc_->mutable_blocks(framework::kRootBlockIndex); + auto *op = main_block->add_ops(); + PADDLE_ENFORCE(!node->pb_msg().empty(), "failed to set desc for block"); + op->ParseFromString(node->pb_msg()); +} + +namespace { +class DFG_DebuggerPass : public DFG_GraphvizDrawPass { + public: + using Config = DFG_GraphvizDrawPass::Config; + explicit DFG_DebuggerPass(const Config &config) + : DFG_GraphvizDrawPass(config) {} + + std::string repr() const override { return "dfg-to-fluid-debuger-pass"; } + + bool Finalize() override { return true; } +}; +} // namespace + +Pass *DataFlowGraphToFluidPass::CreateGraphvizDebugerPass() const { + return new DFG_DebuggerPass(DFG_GraphvizDrawPass::Config( + FLAGS_inference_analysis_graphviz_log_root, + "data_flow_graph_to_fluid_graphviz_debugger")); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h new file mode 100644 index 0000000000..59c47365aa --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h @@ -0,0 +1,62 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +/* + * This file implements the transformation from fluid ProgramDesc to data flow + * graph. + */ + +#pragma once + +#include +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { + +DECLARE_int32(tensorrt_max_batchsize); +DECLARE_int32(tensorrt_workspace_size); + +namespace analysis { +class DataFlowGraphToFluidPass final : public DataFlowGraphPass { + public: + DataFlowGraphToFluidPass() = default; + + bool Initialize(Argument *argument) override; + bool Finalize() override; + + void Run(DataFlowGraph *graph) override; + + std::string repr() const override { return "DFG to fluid"; } + std::string description() const override { + return "Transform a DFG to a Fluid ProgramDesc"; + } + + Pass *CreateGraphvizDebugerPass() const override; + + protected: + // Add a Fluid Op into the ProgramDesc. + void AddFluidOp(Node *node); + // Add a EngineOp into the ProgramDesc. + void AddEngineOp(Node *node); + + private: + framework::proto::ProgramDesc *desc_; + Argument *argument_; +}; +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc new file mode 100644 index 0000000000..4ef381db29 --- /dev/null +++ b/paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass_tester.cc @@ -0,0 +1,48 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" + +#include +#include +#include +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" +#include "paddle/fluid/inference/io.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(DataFlowGraph, Test) { + Argument argument(FLAGS_inference_model_dir); + + FluidToDataFlowGraphPass pass0; + DataFlowGraphToFluidPass pass1; + ASSERT_TRUE(pass0.Initialize(&argument)); + ASSERT_TRUE(pass1.Initialize(&argument)); + + pass0.Run(argument.main_dfg.get()); + pass1.Run(argument.main_dfg.get()); + + pass0.Finalize(); + pass1.Finalize(); + + LOG(INFO) << argument.main_dfg->nodes.size(); +} + +}; // namespace analysis +}; // namespace inference +}; // namespace paddle diff --git a/paddle/fluid/inference/analysis/device.h b/paddle/fluid/inference/analysis/device.h new file mode 100644 index 0000000000..585c992329 --- /dev/null +++ b/paddle/fluid/inference/analysis/device.h @@ -0,0 +1,24 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#pragma once + +namespace paddle { +namespace inference { +namespace analysis { + +enum class Device { CPU, GPU }; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc new file mode 100644 index 0000000000..c05b0e5d46 --- /dev/null +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.cc @@ -0,0 +1,59 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +int DFG_GraphvizDrawPass::counter_{0}; + +void DFG_GraphvizDrawPass::Run(DataFlowGraph *graph) { + auto content = Draw(graph); + auto dot_path = GenDotPath(); + std::ofstream file(dot_path); + file.write(content.c_str(), content.size()); + file.close(); + + auto png_path = dot_path.substr(0, dot_path.size() - 4) + ".png"; + std::string message; + LOG(INFO) << "draw to " << png_path; + ExecShellCommand("dot -Tpng " + dot_path + " -o " + png_path, &message); +} + +std::string DFG_GraphvizDrawPass::Draw(DataFlowGraph *graph) { + Dot dot; + // Add nodes + for (size_t i = 0; i < graph->nodes.size(); i++) { + const Node &node = graph->nodes.Get(i); + if (config_.display_deleted_node || !node.deleted()) { + dot.AddNode(node.repr(), node.dot_attrs()); + } + } + // Add edges + for (size_t i = 0; i < graph->nodes.size(); i++) { + const Node &node = graph->nodes.Get(i); + if (!config_.display_deleted_node && node.deleted()) continue; + for (auto &out : node.outlinks) { + if (!config_.display_deleted_node && out->deleted()) continue; + dot.AddEdge(node.repr(), out->repr(), {}); + } + } + return dot.Build(); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h new file mode 100644 index 0000000000..17445ab440 --- /dev/null +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h @@ -0,0 +1,78 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file create an DFG_GraphvizDrawPass which helps to draw a data flow + * graph's structure using graphviz. + */ + +#pragma once + +#include +#include +#include "paddle/fluid/inference/analysis/dot.h" +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Output a dot file and write to some place. + */ +class DFG_GraphvizDrawPass : public DataFlowGraphPass { + public: + struct Config { + Config(const std::string &dir, const std::string &id, + bool display_deleted_node = false) + : dir(dir), id(id), display_deleted_node(display_deleted_node) {} + + // The directory to store the .dot or .png files. + const std::string dir; + // The identifier for this dot file. + const std::string id; + // Whether to display deleted nodes, default false. + const bool display_deleted_node; + }; + + explicit DFG_GraphvizDrawPass(const Config &config) : config_(config) {} + + bool Initialize(Argument *argument) override { return true; } + void Run(DataFlowGraph *graph) override; + bool Finalize() override { return true; } + + std::string repr() const override { return "DFG graphviz drawer"; } + std::string description() const override { + return "Debug a DFG by draw with graphviz"; + } + + protected: + // A counter to add a number prefix to the debugger image output so that they + // will sort in the triggered order. + static int counter_; + + // Path of the dot file to output. + std::string GenDotPath() const { + return config_.dir + "/" + std::to_string(counter_++) + "-graph_" + + config_.id + ".dot"; + } + + virtual std::string Draw(DataFlowGraph *graph); + + Config config_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc new file mode 100644 index 0000000000..928be79170 --- /dev/null +++ b/paddle/fluid/inference/analysis/dfg_graphviz_draw_pass_tester.cc @@ -0,0 +1,54 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" + +#include +#include +#include +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(DFG_GraphvizDrawPass, dfg_graphviz_draw_pass_tester) { + Argument argument(FLAGS_inference_model_dir); + FluidToDataFlowGraphPass pass0; + ASSERT_TRUE(pass0.Initialize(&argument)); + pass0.Run(argument.main_dfg.get()); + + // auto dfg = ProgramDescToDFG(*argument.origin_program_desc); + + DFG_GraphvizDrawPass::Config config("./", "test"); + DFG_GraphvizDrawPass pass(config); + pass.Initialize(&argument); + pass.Run(argument.main_dfg.get()); + + // test content + std::ifstream file("./0-graph_test.dot"); + ASSERT_TRUE(file.is_open()); + + std::string line; + int no{0}; + while (std::getline(file, line)) { + no++; + } + // DFG is sensitive to ProgramDesc, be careful to change the existing models. + ASSERT_EQ(no, 83); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dot.cc b/paddle/fluid/inference/analysis/dot.cc new file mode 100644 index 0000000000..d5471ffcb5 --- /dev/null +++ b/paddle/fluid/inference/analysis/dot.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/dot.h" + +namespace paddle { +namespace inference { +namespace analysis { +size_t Dot::counter = 0; +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dot.h b/paddle/fluid/inference/analysis/dot.h new file mode 100644 index 0000000000..4bf1840fdd --- /dev/null +++ b/paddle/fluid/inference/analysis/dot.h @@ -0,0 +1,155 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file implements some helper classes and methods for DOT programming + * support. It will give a visualization of the graph and that helps to debug + * the logics of each Pass. + */ +#pragma once + +#include +#include +#include +#include +#include + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * A Dot template that helps to build a DOT graph definition. + */ +class Dot { + public: + static size_t counter; + + struct Attr { + std::string key; + std::string value; + + Attr(const std::string& key, const std::string& value) + : key(key), value(value) {} + + std::string repr() const { + std::stringstream ss; + ss << key << "=" << '"' << value << '"'; + return ss.str(); + } + }; + + struct Node { + std::string name; + std::vector attrs; + + Node(const std::string& name, const std::vector& attrs) + : name(name), + attrs(attrs), + id_("node_" + std::to_string(Dot::counter++)) {} + + std::string id() const { return id_; } + + std::string repr() const { + std::stringstream ss; + CHECK(!name.empty()); + ss << id_; + for (size_t i = 0; i < attrs.size(); i++) { + if (i == 0) { + ss << "[label=" << '"' << name << '"' << " "; + } + ss << attrs[i].repr(); + ss << ((i < attrs.size() - 1) ? " " : "]"); + } + return ss.str(); + } + + private: + std::string id_; + }; + + struct Edge { + std::string source; + std::string target; + std::vector attrs; + + Edge(const std::string& source, const std::string& target, + const std::vector& attrs) + : source(source), target(target), attrs(attrs) {} + + std::string repr() const { + std::stringstream ss; + CHECK(!source.empty()); + CHECK(!target.empty()); + ss << source << "->" << target; + for (size_t i = 0; i < attrs.size(); i++) { + if (i == 0) { + ss << "["; + } + ss << attrs[i].repr(); + ss << ((i < attrs.size() - 1) ? " " : "]"); + } + return ss.str(); + } + }; + + Dot() = default; + + explicit Dot(const std::vector& attrs) : attrs_(attrs) {} + + void AddNode(const std::string& name, const std::vector& attrs) { + CHECK(!nodes_.count(name)) << "duplicate Node '" << name << "'"; + nodes_.emplace(name, Node{name, attrs}); + } + + void AddEdge(const std::string& source, const std::string& target, + const std::vector& attrs) { + CHECK(!source.empty()); + CHECK(!target.empty()); + auto sid = nodes_.at(source).id(); + auto tid = nodes_.at(target).id(); + edges_.emplace_back(sid, tid, attrs); + } + + // Compile to DOT language codes. + std::string Build() const { + std::stringstream ss; + const std::string indent = " "; + ss << "digraph G {" << '\n'; + + // Add graph attrs + for (const auto& attr : attrs_) { + ss << indent << attr.repr() << '\n'; + } + // add nodes + for (auto& item : nodes_) { + ss << indent << item.second.repr() << '\n'; + } + // add edges + for (auto& edge : edges_) { + ss << indent << edge.repr() << '\n'; + } + ss << "} // end G"; + return ss.str(); + } + + private: + std::unordered_map nodes_; + std::vector edges_; + std::vector attrs_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/dot_tester.cc b/paddle/fluid/inference/analysis/dot_tester.cc new file mode 100644 index 0000000000..56ceb9bd5d --- /dev/null +++ b/paddle/fluid/inference/analysis/dot_tester.cc @@ -0,0 +1,62 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/dot.h" + +#include +#include +#include "paddle/fluid/inference/analysis/data_flow_graph.h" + +namespace paddle { +namespace inference { +namespace analysis { + +class DotTester : public ::testing::Test { + protected: + void SetUp() override { + std::vector attrs({{"title", "hello"}}); + dot.reset(new Dot(attrs)); + dot->AddNode("a", {Dot::Attr{"shape", "box"}, Dot::Attr("color", "blue")}); + dot->AddNode("b", {}); + dot->AddNode("c", {}); + dot->AddEdge("a", "b", {}); + dot->AddEdge("b", "c", {}); + dot->AddEdge("a", "c", {}); + } + + std::unique_ptr dot; +}; + +TEST_F(DotTester, Build) { + auto codes = dot->Build(); + // Output the DOT language code, the generated codes are too long to compare + // the string. + // + // The output is + // + // digraph G { + // title="hello" + // node_1 + // node_2 + // node_0[label="a" shape="box" color="blue"] + // node_0->node_1 + // node_1->node_2 + // node_0->node_2 + // } // end G + LOG(INFO) << '\n' << codes; +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc new file mode 100644 index 0000000000..511631d3e0 --- /dev/null +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.cc @@ -0,0 +1,141 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include + +#include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +bool FluidToDataFlowGraphPass::Initialize(Argument *argument) { + ANALYSIS_ARGUMENT_CHECK_FIELD(argument); + if (argument->origin_program_desc) { + LOG(WARNING) << "argument's origin_program_desc is already set, might " + "duplicate called"; + } + if (!argument->fluid_model_program_path) { + ANALYSIS_ARGUMENT_CHECK_FIELD(argument->fluid_model_dir); + argument->fluid_model_program_path.reset( + new std::string(*argument->fluid_model_dir + "/__model__")); + } + ANALYSIS_ARGUMENT_CHECK_FIELD(argument->fluid_model_program_path); + auto program = LoadProgramDesc(*argument->fluid_model_program_path); + argument->origin_program_desc.reset( + new framework::proto::ProgramDesc(program)); + + if (!argument->main_dfg) { + argument->main_dfg.reset(new DataFlowGraph); + } + desc_ = argument->origin_program_desc.get(); + return true; +} + +bool FluidToDataFlowGraphPass::Finalize() { return true; } + +void FluidToDataFlowGraphPass::Run(DataFlowGraph *graph) { + PADDLE_ENFORCE(graph); + PADDLE_ENFORCE(desc_); + // insert vars + // The `var2id` keeps a map from a variable's name to its Node-id, the Node-id + // will keep updating to its latest alias during the graph-building. + std::unordered_map var2id; + auto &main_block = desc_->blocks(framework::kRootBlockIndex); + for (int i = 0; i < main_block.vars_size(); i++) { + const auto &var = main_block.vars(i); + auto *v = graph->nodes.Create(Node::Type::kValue); + v->SetName(var.name()); + v->SetPbDesc(const_cast(static_cast(&var))); + v->SetPbMsg(var.SerializeAsString()); + var2id[var.name()] = v->id(); + } + + // The variables in a SSA can only write once, so if a variable is written + // multiple times(quite common in our ProgramDesc design), multiple alias + // Nodes of this variable will be created, and each will just write once. + + // An set that keep all the names of the variables(the original, not alias) + // that have been written(as outputs). Once an Op's output variable hit the + // set, it should create a new alias and update the global alias for this + // variable. And that make a Data Flow Graph a SSA. + std::unordered_set unique_written_vars; + for (int i = 0; i < main_block.ops_size(); i++) { + const auto &op = main_block.ops(i); + auto *o = graph->nodes.Create(Node::Type::kFunction); + o->SetName(op.type()); + static_cast(o)->SetFuncType(op.type()); + // Link to the original protobuf message's memory, make it easier to + // generate from a data flow graph to fluid ProgramDesc. + o->SetPbDesc(const_cast(static_cast(&op))); + o->SetPbMsg(op.SerializeAsString()); + + // set inputs and outputs + for (int j = 0; j < op.inputs_size(); j++) { + auto &in_var = op.inputs(j); + for (int k = 0; k < in_var.arguments_size(); k++) { + auto *in = graph->nodes.GetMutable(var2id.at(in_var.arguments(k))); + in->outlinks.push_back(o); + o->inlinks.push_back(in); + } + } + for (int j = 0; j < op.outputs_size(); j++) { + auto &out_var = op.outputs(j); + for (int k = 0; k < out_var.arguments_size(); k++) { + auto *out = graph->nodes.GetMutable(var2id[out_var.arguments(k)]); + if (unique_written_vars.count(out)) { + // Loop found, for example, a = op(a), use SSA, change to a1 = op(a). + auto *out_alias = graph->nodes.Create(Node::Type::kValue); + out_alias->SetName(out->name()); + out_alias->SetPbDesc(out->pb_desc()); + out_alias->SetPbMsg(out->pb_msg()); + var2id[out_alias->name()] = + out_alias->id(); // update variable's alias Node + LOG(INFO) << "loop found in graph, create SSA alias node [" + << out_alias->repr() << "] for [" << out->repr() << "]"; + out = out_alias; + } + out->inlinks.push_back(o); + o->outlinks.push_back(out); + unique_written_vars.insert(out); + } + } + } + // Analysis and extract the inputs and outputs of this graph. + graph->Build(); +} + +namespace { +class DFG_DebuggerPass : public DFG_GraphvizDrawPass { + public: + using Config = DFG_GraphvizDrawPass::Config; + explicit DFG_DebuggerPass(const Config &config) + : DFG_GraphvizDrawPass(config) {} + std::string repr() const override { return "fluid-to-dfg-debuger-pass"; } + bool Finalize() override { return true; } +}; +} + +Pass *FluidToDataFlowGraphPass::CreateGraphvizDebugerPass() const { + return new DFG_DebuggerPass(DFG_GraphvizDrawPass::Config( + FLAGS_inference_analysis_graphviz_log_root, "fluid-to-dfg-debuger")); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h new file mode 100644 index 0000000000..fb948bf224 --- /dev/null +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h @@ -0,0 +1,57 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +/* + * This file implements the transformation from data flow graph to fluid + * ProgramDesc. + */ + +#pragma once + +#include + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Transform a FluidDesc to a SSA. + */ +class FluidToDataFlowGraphPass final : public DataFlowGraphPass { + public: + FluidToDataFlowGraphPass() = default; + + bool Initialize(Argument *argument) override; + bool Finalize() override; + + void Run(DataFlowGraph *graph) override; + + std::string repr() const override { return "fluid-to-data-flow-graph"; } + std::string description() const override { + return "transform a fluid ProgramDesc to a data flow graph."; + } + + Pass *CreateGraphvizDebugerPass() const override; + + private: + framework::proto::ProgramDesc const *desc_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc new file mode 100644 index 0000000000..d218dcd050 --- /dev/null +++ b/paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass_tester.cc @@ -0,0 +1,38 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" + +#include +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(FluidToDataFlowGraphPass, Test) { + FluidToDataFlowGraphPass pass; + Argument argument(FLAGS_inference_model_dir); + pass.Initialize(&argument); + pass.Run(argument.main_dfg.get()); + // Analysis is sensitive to ProgramDesc, careful to change the original model. + ASSERT_EQ(argument.main_dfg->nodes.size(), 38UL); + pass.Finalize(); + ASSERT_FALSE(argument.main_dfg->DotString().empty()); + EXPECT_FALSE(argument.main_dfg->inputs.empty()); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/graph_traits.cc b/paddle/fluid/inference/analysis/graph_traits.cc new file mode 100644 index 0000000000..2ea70a1d20 --- /dev/null +++ b/paddle/fluid/inference/analysis/graph_traits.cc @@ -0,0 +1,15 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/graph_traits.h" diff --git a/paddle/fluid/inference/analysis/graph_traits.h b/paddle/fluid/inference/analysis/graph_traits.h new file mode 100644 index 0000000000..aed2b1e8e2 --- /dev/null +++ b/paddle/fluid/inference/analysis/graph_traits.h @@ -0,0 +1,63 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the GraphTraits template class that should be specified + * by classes that want to be iteratable by generic graph iterators. + * + * This file also defines the marker class Inverse that is used to iterate over + * graphs in a graph defined, inverse ordering... + */ + +#pragma once + +#include "paddle/fluid/inference/analysis/helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * This class should be specialized by different graph types... + * That's why the base class is empty. + */ +template +struct GraphTraits { + // using NodesBFSIterator = xxx + + // NodesBFSIterator nodes_begin(); + // NodesBFSIterator nodes_end(); +}; + +/* + * Inverse - This class is used as a marker class to tell the graph iterator to + * iterate in a graph defined Inverse order. + */ +template +struct Inverse { + const GraphType &graph; + + explicit Inverse(const GraphType &graph) : graph(graph) {} +}; + +/* + * Provide a partial specialization of GraphTraits so that the inverse of an + * inverse turns into the original graph. + */ +template +struct GraphTraits>> : GraphTraits {}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/helper.cc b/paddle/fluid/inference/analysis/helper.cc new file mode 100644 index 0000000000..ca40c01fc5 --- /dev/null +++ b/paddle/fluid/inference/analysis/helper.cc @@ -0,0 +1,60 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/framework/framework.pb.h" + +namespace paddle { +namespace inference { +namespace analysis { + +template <> +void SetAttr(framework::proto::OpDesc *op, const std::string &name, + const std::string &data) { + auto *attr = op->add_attrs(); + attr->set_name(name); + attr->set_type(paddle::framework::proto::AttrType::STRING); + attr->set_s(data); +} +template <> +void SetAttr(framework::proto::OpDesc *op, const std::string &name, + const int &data) { + auto *attr = op->add_attrs(); + attr->set_name(name); + attr->set_type(paddle::framework::proto::AttrType::INT); + attr->set_i(data); +} +template <> +void SetAttr(framework::proto::OpDesc *op, const std::string &name, + const int64_t &data) { + auto *attr = op->add_attrs(); + attr->set_name(name); + attr->set_type(paddle::framework::proto::AttrType::LONG); + attr->set_l(data); +} +template <> +void SetAttr>(framework::proto::OpDesc *op, + const std::string &name, + const std::vector &data) { + auto *attr = op->add_attrs(); + attr->set_name(name); + attr->set_type(paddle::framework::proto::AttrType::STRINGS); + for (const auto &s : data) { + attr->add_strings(s.c_str()); + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/helper.h b/paddle/fluid/inference/analysis/helper.h new file mode 100644 index 0000000000..a0f912b251 --- /dev/null +++ b/paddle/fluid/inference/analysis/helper.h @@ -0,0 +1,160 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace analysis { + +template +void SetAttr(framework::proto::OpDesc *op, const std::string &name, + const T &data); + +template +int AccuDims(Vec &&vec, int size) { + int res = 1; + for (int i = 0; i < size; i++) { + res *= std::forward(vec)[i]; + } + return res; +} + +#define SET_TYPE(type__) dic_[std::type_index(typeid(type__))] = #type__; +/* + * Map typeid to representation. + */ +struct DataTypeNamer { + static const DataTypeNamer &Global() { + static auto *x = new DataTypeNamer(); + return *x; + } + + template + const std::string &repr() const { + auto x = std::type_index(typeid(T)); + PADDLE_ENFORCE(dic_.count(x), "unknown type for representation"); + return dic_.at(x); + } + + const std::string &repr(const std::type_index &type) const { // NOLINT + PADDLE_ENFORCE(dic_.count(type), "unknown type for representation"); + return dic_.at(type); + } + + private: + DataTypeNamer() { + SET_TYPE(int); + SET_TYPE(bool); + SET_TYPE(float); + SET_TYPE(void *); + } + + std::unordered_map dic_; +}; +#undef SET_TYPE + +template +class iterator_range { + IteratorT begin_, end_; + + public: + template + explicit iterator_range(Container &&c) : begin_(c.begin()), end_(c.end()) {} + + iterator_range(const IteratorT &begin, const IteratorT &end) + : begin_(begin), end_(end) {} + + const IteratorT &begin() const { return begin_; } + const IteratorT &end() const { return end_; } +}; + +/* + * An registry helper class, with its records keeps the order they registers. + */ +template +class OrderedRegistry { + public: + T *Register(const std::string &name, T *x) { + PADDLE_ENFORCE(!dic_.count(name), "duplicate key [%s]", name); + dic_[name] = data_.size(); + data_.emplace_back(std::unique_ptr(x)); + return data_.back().get(); + } + + T *Lookup(const std::string &name) { + auto it = dic_.find(name); + if (it == dic_.end()) return nullptr; + return data_[it->second].get(); + } + + protected: + std::unordered_map dic_; + std::vector> data_; +}; + +template +T &GetFromScope(const framework::Scope &scope, const std::string &name) { + framework::Variable *var = scope.FindVar(name); + PADDLE_ENFORCE(var != nullptr); + return *var->GetMutable(); +} + +static void ExecShellCommand(const std::string &cmd, std::string *message) { + char buffer[128]; + std::shared_ptr pipe(popen(cmd.c_str(), "r"), pclose); + if (!pipe) { + LOG(ERROR) << "error running command: " << cmd; + return; + } + while (!feof(pipe.get())) { + if (fgets(buffer, 128, pipe.get()) != nullptr) { + *message += buffer; + } + } +} + +static framework::proto::ProgramDesc LoadProgramDesc( + const std::string &model_path) { + std::ifstream fin(model_path, std::ios::in | std::ios::binary); + PADDLE_ENFORCE(fin.is_open(), "Cannot open file %s", model_path); + fin.seekg(0, std::ios::end); + std::string buffer(fin.tellg(), ' '); + fin.seekg(0, std::ios::beg); + fin.read(&buffer[0], buffer.size()); + fin.close(); + framework::proto::ProgramDesc program_desc; + program_desc.ParseFromString(buffer); + return program_desc; +} + +} // namespace analysis +} // namespace inference +} // namespace paddle + +#define PADDLE_DISALLOW_COPY_AND_ASSIGN(type__) \ + type__(const type__ &) = delete; \ + void operator=(const type__ &) = delete; diff --git a/paddle/fluid/inference/analysis/model_store_pass.cc b/paddle/fluid/inference/analysis/model_store_pass.cc new file mode 100644 index 0000000000..1c42917642 --- /dev/null +++ b/paddle/fluid/inference/analysis/model_store_pass.cc @@ -0,0 +1,63 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/analysis/argument.h" +#include "paddle/fluid/inference/analysis/model_store_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void ModelStorePass::Run(DataFlowGraph *x) { + if (!argument_->fluid_model_param_path) { + PADDLE_ENFORCE_NOT_NULL(argument_->fluid_model_dir); + argument_->fluid_model_param_path.reset( + new std::string(*argument_->fluid_model_dir + "param")); + } + PADDLE_ENFORCE_NOT_NULL(argument_->model_output_store_path); + // Directly copy param file to destination. + std::stringstream ss; + // NOTE these commands only works on linux. + ss << "mkdir -p " << *argument_->model_output_store_path; + LOG(INFO) << "run command: " << ss.str(); + PADDLE_ENFORCE_EQ(system(ss.str().c_str()), 0); + ss.str(""); + + ss << "cp " << *argument_->fluid_model_dir << "/*" + << " " << *argument_->model_output_store_path; + LOG(INFO) << "run command: " << ss.str(); + PADDLE_ENFORCE_EQ(system(ss.str().c_str()), 0); + + // Store program + PADDLE_ENFORCE_NOT_NULL(argument_->transformed_program_desc, + "program desc is not transformed, should call " + "DataFlowGraphToFluidPass first."); + const std::string program_output_path = + *argument_->model_output_store_path + "/__model__"; + std::ofstream file(program_output_path, std::ios::binary); + PADDLE_ENFORCE(file.is_open(), "failed to open %s to write.", + program_output_path); + const std::string serialized_message = + argument_->transformed_program_desc->SerializeAsString(); + file.write(serialized_message.c_str(), serialized_message.size()); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/model_store_pass.h b/paddle/fluid/inference/analysis/model_store_pass.h new file mode 100644 index 0000000000..fac7083925 --- /dev/null +++ b/paddle/fluid/inference/analysis/model_store_pass.h @@ -0,0 +1,53 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file defines ModelStorePass, which store the runtime DFG to a Paddle + * model in the disk, and that model can be reloaded for prediction. + */ + +#pragma once +#include +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +class ModelStorePass : public DataFlowGraphPass { + public: + bool Initialize(Argument* argument) override { + if (!argument) { + LOG(ERROR) << "invalid argument"; + return false; + } + argument_ = argument; + return true; + } + + void Run(DataFlowGraph* x) override; + + std::string repr() const override { return "DFG-store-pass"; } + std::string description() const override { + return R"DD(This file defines ModelStorePass, which store the runtime DFG to a Paddle + model in the disk, and that model can be reloaded for prediction again.)DD"; + } + + private: + Argument* argument_{nullptr}; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/model_store_pass_tester.cc b/paddle/fluid/inference/analysis/model_store_pass_tester.cc new file mode 100644 index 0000000000..5f3526dd50 --- /dev/null +++ b/paddle/fluid/inference/analysis/model_store_pass_tester.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/model_store_pass.h" + +#include +#include +#include "paddle/fluid/inference/analysis/analyzer.h" + +namespace paddle { +namespace inference { +namespace analysis { + +DEFINE_string(inference_model_dir, "", "Model path"); + +TEST(DFG_StorePass, test) { + Analyzer analyzer; + Argument argument(FLAGS_inference_model_dir); + argument.model_output_store_path.reset( + new std::string("./_dfg_store_pass_tmp")); + // disable storage in alalyzer + FLAGS_inference_analysis_output_storage_path = ""; + analyzer.Run(&argument); + + ModelStorePass pass; + pass.Initialize(&argument); + pass.Run(argument.main_dfg.get()); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/node.cc b/paddle/fluid/inference/analysis/node.cc new file mode 100644 index 0000000000..f2e918f3ff --- /dev/null +++ b/paddle/fluid/inference/analysis/node.cc @@ -0,0 +1,81 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/node.h" +#include "glog/logging.h" +#include "paddle/fluid/platform/enforce.h" + +namespace paddle { +namespace inference { +namespace analysis { + +template <> +std::string &NodeAttr::As() { + if (data_.empty()) { + type_index_ = std::type_index(typeid(std::string)); + } + PADDLE_ENFORCE_EQ(type_index_, std::type_index(typeid(std::string))); + return data_; +} + +std::string &NodeAttr::String() { return As(); } + +std::vector Value::dot_attrs() const { + return std::vector({Dot::Attr("style", "filled,rounded"), + Dot::Attr("shape", "box"), + Dot::Attr("fillcolor", "red")}); +} + +std::vector Function::dot_attrs() const { + return std::vector({Dot::Attr("style", "filled,rounded"), + Dot::Attr("shape", "diamond"), + Dot::Attr("fillcolor", "yellow")}); +} + +Node *NodeMap::Create(Node::Type type) { + switch (type) { + case Node::Type::kFunction: + nodes_.emplace_back(new Function); + break; + case Node::Type::kValue: + nodes_.emplace_back(new Value); + break; + case Node::Type::kFunctionBlock: + nodes_.emplace_back(new FunctionBlock); + break; + default: + PADDLE_THROW("Not supported node type."); + } + nodes_.back()->id_ = size() - 1; + return nodes_.back().get(); +} + +Node *NodeMap::GetMutable(size_t id) { + PADDLE_ENFORCE_GT(size(), id); + return nodes_[id].get(); +} + +const Node &NodeMap::Get(size_t id) const { + PADDLE_ENFORCE_GT(size(), id); + return *nodes_[id].get(); +} + +void NodeMap::Delete(size_t id) { + PADDLE_ENFORCE_LT(id, size()); + nodes_[id]->SetDeleted(); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/node.h b/paddle/fluid/inference/analysis/node.h new file mode 100644 index 0000000000..47e524bc5c --- /dev/null +++ b/paddle/fluid/inference/analysis/node.h @@ -0,0 +1,248 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the Node class and its subclasses. A Node is the basis + * analysis element in a computation graph. + * There are basically two kinds of nodes, the function node and value node. + */ +#pragma once + +#include +#include +#include +#include +#include + +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/inference/analysis/device.h" +#include "paddle/fluid/inference/analysis/dot.h" +#include "paddle/fluid/inference/analysis/helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +class NodeMap; + +// A helper class to maintain the status from Pass. +struct NodeAttr { + // NOTE T should be a primary type or a struct combined by several primary + // types. + // NOTE the STL containers should not use here. + // Some usages + // Attr attr; + // attr.Bool() = true; + + bool &Bool() { return As(); } + float &Float() { return As(); } + int32_t &Int32() { return As(); } + int64_t &Int64() { return As(); } + void *&Pointer() { return As(); } + std::string &String(); + + private: + template + T &As() { + // init storage in the first usage. + if (data_.empty()) { + VLOG(4) << "resize data to " << sizeof(T); + type_index_ = std::type_index(typeid(T)); + data_.resize(sizeof(T)); + } + PADDLE_ENFORCE(framework::IsType(type_index_), + "type not matched, origin is %s, want %s", + DataTypeNamer::Global().repr(type_index_), + DataTypeNamer::Global().repr()); + PADDLE_ENFORCE_EQ(data_.size(), sizeof(T), "Node attr type recast error"); + return *reinterpret_cast(&data_[0]); + } + + private: + std::string data_; + std::type_index type_index_{typeid(NodeAttr)}; +}; + +/* + * Node Representation. + * + * This is a very important class for analysis. It is the base class of all + * nodes computed by a program that may be used as operands to other nodes. + * Node is the super class of other important classes such as Function and + * Value, some nodes can have a name. + */ +class Node { + public: + // Node type. NOTE the new node types should add here. + enum class Type { kNone = -1, kFunction, kValue, kFunctionBlock }; + + Node() = default; + + // Cast to a subclass type, Function for example. + template + Subclass &As() { + return *dynamic_cast(this); + } + + // Formatted representation of this Node. + virtual std::string repr() const { + return name() + "(" + std::to_string(id()) + ")"; + } + + // DOT node representation. One Node type can customize its own node + // representation. + virtual std::vector dot_attrs() const { + return std::vector({Dot::Attr("style", "filled")}); + } + + // Get an additional attribute and convert it to T data type. NOTE this will + // silently create a new attribute if not exists. + NodeAttr &attr(const std::string &name) const { return attrs_[name]; } + + int id() const { return id_; } + + // The Protobuf description is set/get with a void* to decouple Node interface + // from a specific kind of Protobuf message. + void SetPbDesc(void *pb) { attr("pb_desc").Pointer() = pb; } + void *pb_desc() const { return attr("pb_desc").Pointer(); } + + void SetPbMsg(const std::string &s) { attr("pb_msg").String() = s; } + const std::string &pb_msg() const { return attr("pb_msg").String(); } + + void SetDeleted() { deleted_ = true; } + bool deleted() const { return deleted_; } + + void SetName(const std::string &name) { name_ = name; } + const std::string &name() const { return name_; } + + void SetType(Type type) { type_ = type; } + Type type() const { return type_; } + + // Input links. + std::vector inlinks; + // Output links. + std::vector outlinks; + + // Type checks. + bool IsFunction() const { return type_ == Node::Type::kFunction; } + bool IsValue() const { return type_ == Node::Type::kValue; } + bool IsFunctionBlock() const { return type_ == Node::Type::kFunctionBlock; } + + virtual ~Node() {} + + friend class NodeMap; + + PADDLE_DISALLOW_COPY_AND_ASSIGN(Node); + + protected: + // The id number not the name is a node's unique identifier in the computation + // graph. + int id_{-1}; + std::string name_; + Type type_{Type::kNone}; + // Mark this node is deleted by some pass. + bool deleted_{false}; + mutable std::unordered_map attrs_; +}; + +class Function; +/* + * Value represents a value node, it has some attributes including dims, data + * type and so on. + */ +class Value : public Node { + public: + enum class DataType { kInt32, kInt64, kFloat32, kFloat64 }; + using Dims = std::vector; + + void SetDataType(DataType data_type) { data_type_ = data_type; } + DataType data_type() const { return data_type_; } + + void SetDims(const Dims &dims) { dims_ = dims; } + const Dims &dims() const { return dims_; } + + Device device() const { return device_; } + void SetDevice(Device device) { device_ = device; } + + std::vector dot_attrs() const override; + + PADDLE_DISALLOW_COPY_AND_ASSIGN(Value); + + protected: + Value() { SetType(Node::Type::kValue); } + friend class NodeMap; + + private: + DataType data_type_; + Dims dims_; + Device device_; +}; + +/* + * Function represents any kind of executable concepts that takes several Values + * as input, and outputs several Values. + */ +class Function : public Node { + public: + std::vector dot_attrs() const override; + + // Get the operator's type from Desc. + const std::string &func_type() const { return func_type_; } + // Set the operator's type. + void SetFuncType(const std::string &func_type) { func_type_ = func_type; } + + PADDLE_DISALLOW_COPY_AND_ASSIGN(Function); + + protected: + std::string func_type_; + Function() { SetType(Node::Type::kFunction); } + friend class NodeMap; +}; + +/* + * FunctionBlock is a Node that contains a sub-graph multiple Node. + */ +struct FunctionBlock : public Node { + std::string repr() const override { return "block-" + std::to_string(id()); } + std::vector subgraph; + + protected: + FunctionBlock() { SetType(Node::Type::kFunctionBlock); } + friend class NodeMap; +}; + +class NodeMap { + public: + // Create a new node with type. + Node *Create(Node::Type type); + + // Get a node by its id. + Node *GetMutable(size_t id); + + const Node &Get(size_t id) const; + + void Delete(size_t id); + + const std::vector> &nodes() const { return nodes_; } + + size_t size() const { return nodes_.size(); } + + private: + std::vector> nodes_; + std::unordered_map map_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/node_attr_flags.h b/paddle/fluid/inference/analysis/node_attr_flags.h new file mode 100644 index 0000000000..a3f70e5419 --- /dev/null +++ b/paddle/fluid/inference/analysis/node_attr_flags.h @@ -0,0 +1,32 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file contains all the flags that declared in Node::Attr. + * + * The Node::Attr is designed to share information between different passes, one + * can get other's attributes in a Node by the flags in this file. + */ +#pragma once +namespace paddle { +namespace inference { +namespace analysis { + +#define DECLARE_NODE_ATTR(flag__) const char ATTR_##flag__[] = #flag__; + +DECLARE_NODE_ATTR(supported_by_tensorrt) // bool + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/node_tester.cc b/paddle/fluid/inference/analysis/node_tester.cc new file mode 100644 index 0000000000..ea832a3a7e --- /dev/null +++ b/paddle/fluid/inference/analysis/node_tester.cc @@ -0,0 +1,34 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/node.h" + +#include + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(Node, Attr) { + // Node is an abstract class, use Value instead for they share the same Attr + // logic. + NodeMap nodes; + auto* node = nodes.Create(Node::Type::kValue); + node->attr("v0").Int32() = 2008; + ASSERT_EQ(node->attr("v0").Int32(), 2008); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/framework/details/ssa_graph.cc b/paddle/fluid/inference/analysis/pass.cc similarity index 92% rename from paddle/fluid/framework/details/ssa_graph.cc rename to paddle/fluid/inference/analysis/pass.cc index 1b8c889449..121b72c0a0 100644 --- a/paddle/fluid/framework/details/ssa_graph.cc +++ b/paddle/fluid/inference/analysis/pass.cc @@ -12,4 +12,4 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/framework/details/ssa_graph.h" +#include "paddle/fluid/inference/analysis/pass.h" diff --git a/paddle/fluid/inference/analysis/pass.h b/paddle/fluid/inference/analysis/pass.h new file mode 100644 index 0000000000..6806f9ff7d --- /dev/null +++ b/paddle/fluid/inference/analysis/pass.h @@ -0,0 +1,95 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include + +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/inference/analysis/argument.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/analysis/node.h" + +namespace paddle { +namespace inference { +namespace analysis { + +class Pass { + public: + Pass() = default; + virtual ~Pass() = default; + // Mutable Pass. + virtual bool Initialize(Argument *argument) { return false; } + // Readonly Pass. + virtual bool Initialize(const Argument &argument) { return false; } + + // Virtual method overriden by subclasses to do any necessary clean up after + // all passes have run. + virtual bool Finalize() { return false; } + + // Get a Pass appropriate to print the Node this pass operates on. + virtual Pass *CreatePrinterPass(std::ostream &os, + const std::string &banner) const { + return nullptr; + } + + // Create a debugger Pass that draw the DFG by graphviz toolkit. + virtual Pass *CreateGraphvizDebugerPass() const { return nullptr; } + + virtual void Run() { LOG(FATAL) << "not valid"; } + // Run on a single Node. + virtual void Run(Node *x) { LOG(FATAL) << "not valid"; } + // Run on a single Function. + virtual void Run(Function *x) { LOG(FATAL) << "not valid"; } + // Run on a single FunctionBlock. + virtual void Run(FunctionBlock *x) { LOG(FATAL) << "not valid"; } + // Run on a single DataFlowGraph. + virtual void Run(DataFlowGraph *x) { LOG(FATAL) << "not valid"; } + + // Human-readable short representation. + virtual std::string repr() const = 0; + // Human-readable long description. + virtual std::string description() const = 0; +}; + +// NodePass process on any Node types. +class NodePass : public Pass { + public: + virtual void Run(Node *node) = 0; +}; + +// NodePass process on any Function node types. +class FunctionPass : public Pass { + public: + virtual void Run(Function *node) = 0; +}; + +// NodePass process on any FunctionBlock node types. +class FunctionBlockPass : public Pass { + public: + virtual void Run(FunctionBlock *node) = 0; +}; + +// GraphPass processes on any GraphType. +class DataFlowGraphPass : public Pass { + public: + virtual void Run(DataFlowGraph *graph) = 0; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/pass_manager.cc b/paddle/fluid/inference/analysis/pass_manager.cc new file mode 100644 index 0000000000..b428bb22b1 --- /dev/null +++ b/paddle/fluid/inference/analysis/pass_manager.cc @@ -0,0 +1,56 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/pass_manager.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +bool PassManager::Initialize(Argument* argument) { + argument_ = argument; + for (auto& pass : data_) { + LOG(INFO) << "Initializing pass " << pass->repr(); + if (!pass->Initialize(argument)) { + LOG(ERROR) << "Failed to initialize pass [" << pass->repr() << "]"; + return false; + } + } + return true; +} + +void DfgPassManager::RunAll() { + PADDLE_ENFORCE(argument_); + for (auto& pass : data_) { + VLOG(4) << "Running pass [" << pass->repr() << "]"; + pass->Run(argument_->main_dfg.get()); + } +} + +void NodePassManager::RunAll() { + PADDLE_ENFORCE(argument_); + PADDLE_ENFORCE(argument_->main_dfg.get()); + auto trait = + GraphTraits(argument_->main_dfg.get()).nodes_in_DFS(); + for (auto& node : trait) { + for (auto& pass : data_) { + pass->Run(&node); + } + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/pass_manager.h b/paddle/fluid/inference/analysis/pass_manager.h new file mode 100644 index 0000000000..81a17e0287 --- /dev/null +++ b/paddle/fluid/inference/analysis/pass_manager.h @@ -0,0 +1,106 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the logic of pass management. The analysis for inference is + * a pipeline of Passes, a PassManager is a agency that helps to manage the + * executation of the Passes. + * + * There are two modes of Passes, the first one is called NodePass and takes + * an Node as input and output; the second one is called DFGPass and takes a + * DFG(Data Flow Graph) as input and output. It is hard to put all the passes in + * the same pipeline, there are two kinds of PassManagers, both takes a DFG as + * input and output a DFG, but the Passes inside are different: + * + * 1. NodePassManager: the passes inside are all NodePasses, it can have + * different graph trivial algorithm, for example, DFS_NodePassManager will + * trigger the passes in depth first order; + * 2. DfgPassManager: the passes inside are all DfgPasses. + */ + +#pragma once + +#include +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/inference/analysis/pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * PassManager is the base class for all pass managers, a pass manager has + * several Pass-es registered, and execute them in the linear order. + */ +class PassManager : public OrderedRegistry { + public: + PassManager() = default; + // Call all the passes' Initialize methods. The desc and data_flow_graph are + // globally shared, so pass them as the arguemnts for all the pass managers. + virtual bool Initialize(const Argument& argument) { return false; } + + virtual bool Initialize(Argument* argument); + + // Call all the passes' Finalize methods. + virtual bool Finalize() { + for (auto& pass : data_) { + if (!pass->Finalize()) { + LOG(ERROR) << "Failed to finalize pass [" << pass->repr() << "]"; + return false; + } + } + return true; + } + + // Run all the passes. + virtual void RunAll() = 0; + + // Short identifier. + virtual std::string repr() const = 0; + // Long description. + virtual std::string description() const = 0; + + virtual ~PassManager() = default; + + protected: + Argument* argument_{nullptr}; +}; + +/* + * A pass manager that process a DFG. + */ +class DfgPassManager : public PassManager { + public: + DfgPassManager() = default; + + void RunAll() override; + + virtual ~DfgPassManager() = default; +}; + +/* + * A pass manager that process a Node each time. + */ +class NodePassManager : public PassManager { + public: + NodePassManager() = default; + + void RunAll() override; + + virtual ~NodePassManager() = default; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/pass_manager_tester.cc b/paddle/fluid/inference/analysis/pass_manager_tester.cc new file mode 100644 index 0000000000..13423e4837 --- /dev/null +++ b/paddle/fluid/inference/analysis/pass_manager_tester.cc @@ -0,0 +1,89 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include + +#include "paddle/fluid/inference/analysis/data_flow_graph_to_fluid_pass.h" +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" +#include "paddle/fluid/inference/analysis/pass_manager.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +class TestDfgPassManager final : public DfgPassManager { + public: + TestDfgPassManager() = default; + virtual ~TestDfgPassManager() = default; + // Short identifier. + std::string repr() const override { return "test-pass-manager"; } + // Long description. + std::string description() const override { return "test doc"; } +}; + +class TestNodePassManager final : public NodePassManager { + public: + virtual ~TestNodePassManager() = default; + + std::string repr() const override { return "test-node-pass-manager"; } + std::string description() const override { return "test doc"; } +}; + +class TestNodePass final : public NodePass { + public: + virtual ~TestNodePass() = default; + + bool Initialize(Argument* argument) override { return true; } + + void Run(Node* node) override { + LOG(INFO) << "- Processing node " << node->repr(); + } + + std::string repr() const override { return "test-node"; } + std::string description() const override { return "some doc"; } +}; + +TEST(PassManager, DFG_pass_manager) { + TestDfgPassManager manager; + DFG_GraphvizDrawPass::Config config("./", "dfg.dot"); + + manager.Register("fluid-to-flow-graph", new FluidToDataFlowGraphPass); + manager.Register("graphviz", new DFG_GraphvizDrawPass(config)); + manager.Register("dfg-to-fluid", new DataFlowGraphToFluidPass); + + Argument argument(FLAGS_inference_model_dir); + + ASSERT_TRUE(&argument); + ASSERT_TRUE(manager.Initialize(&argument)); + manager.RunAll(); +} + +TEST(PassManager, Node_pass_manager) { + Argument argument(FLAGS_inference_model_dir); + // Pre-process: initialize the DFG with the ProgramDesc first. + FluidToDataFlowGraphPass pass0; + pass0.Initialize(&argument); + pass0.Run(argument.main_dfg.get()); + + TestNodePassManager manager; + manager.Register("test-node-pass", new TestNodePass); + ASSERT_TRUE(manager.Initialize(&argument)); + manager.RunAll(); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/subgraph_splitter.cc b/paddle/fluid/inference/analysis/subgraph_splitter.cc new file mode 100644 index 0000000000..80809d4c43 --- /dev/null +++ b/paddle/fluid/inference/analysis/subgraph_splitter.cc @@ -0,0 +1,160 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" + +namespace paddle { +namespace inference { +namespace analysis { + +const char *SubGraphSplitter::kMarkerAttrName = + "_sub_graph_splitter_inside_sub_graph"; + +std::vector> SubGraphSplitter::operator()() { + MarkNodesInsideSubGraph(); + return ExtractSubGraphs(); +} + +// Mark the output variables inside a subgraph with the func. +inline void MarkOutLinksInSubGraph(const Function *func) { + for (auto *var : func->outlinks) { + var->attr(SubGraphSplitter::kMarkerAttrName).Bool() = true; + } +} + +void SubGraphSplitter::MarkNodesInsideSubGraph() { + for (auto &node : GraphTraits(graph_).nodes()) { + if (node_inside_subgraph_teller_(&node)) { + node.attr(kMarkerAttrName).Bool() = true; + if (node.type() == Node::Type::kFunction) { + // If a function is inside the sub-graph, mark all the output variables + // to be inside too, so that two marked functions will be inside a same + // sub-graph, lets take a example: A_function->var->B_function, if + // A_function is marked, var should also be marked, so that B_function + // will be in the same sub-graph with A_function if B_function is + // marked. + MarkOutLinksInSubGraph(static_cast(&node)); + } + } + } +} + +const char *kUnionFindParent = "_sub_graph_splitter_union_find_parent_"; + +// Use the Union Find(UF) algorithm to find fully connected sub-graphs, if node +// a's output is node b, that is a and b is in the same sub-graph. The UF +// algorithm will group them to the same cluster. +using node_map_t = std::unordered_map; +// Find the ancestor id of a node. +int UnionFindGetAncestor(const node_map_t &node_map, size_t id) { + int tmp = id; + do { + tmp = node_map.at(tmp)->attr(kUnionFindParent).Int32(); + } while (node_map.at(tmp)->attr(kUnionFindParent).Int32() != tmp); + return tmp; +} +// Make this two node share the same ancestor. +// TODO(Superjom) bad performance, make a balanced tree latter. +void UnionFindCombine(const node_map_t &node_map, size_t a, size_t b) { + int a_ancestor = UnionFindGetAncestor(node_map, a); + int b_ancestor = UnionFindGetAncestor(node_map, b); + node_map.at(b_ancestor)->attr(kUnionFindParent).Int32() = a_ancestor; + node_map.at(a)->attr(kUnionFindParent).Int32() = a_ancestor; + node_map.at(b)->attr(kUnionFindParent).Int32() = a_ancestor; +} + +std::vector> SubGraphSplitter::ExtractSubGraphs() { + std::vector marked_nodes; + for (auto &node : GraphTraits(graph_).nodes_in_TS()) { + if (node.attr(kMarkerAttrName).Bool()) { + marked_nodes.push_back(&node); + } + } + // extract sub-graphs in the marked node set, use Union Find algorithm. + node_map_t node_map; // id to ptr + for (auto *n : marked_nodes) { + // n's parent == n.id means it is the ancestor + n->attr(kUnionFindParent).Int32() = n->id(); + node_map[n->id()] = n; + } + std::unordered_set visited; + for (auto *n : marked_nodes) { + for (auto *out : n->outlinks) { + if (node_map.count(out->id())) { + UnionFindCombine(node_map, n->id(), out->id()); + } + } + } + + std::unordered_map> clusters; + for (auto *n : marked_nodes) { + if (n->type() == Node::Type::kFunction) { + clusters[UnionFindGetAncestor(node_map, + n->attr(kUnionFindParent).Int32())] + .push_back(n); + } + } + std::vector> result; + std::for_each(clusters.begin(), clusters.end(), + [&](const decltype(clusters)::value_type &it) { + result.push_back(it.second); + }); + + return result; +} + +void SubGraphFuse::operator()() { ReplaceNodesWithSubGraphs(); } + +void SubGraphFuse::ReplaceNodesWithSubGraphs() { + auto subgraphs = SubGraphSplitter(graph_, node_inside_subgraph_teller_)(); + for (auto &subgraph : subgraphs) { + std::unordered_set subgraph_uniq(subgraph.begin(), subgraph.end()); + // replace this sub-graph with the first node. Two steps: 1. Create a Block + // Node that contains this subgraph 2. Mark the nodes inside the sub-graph + // as deleted. 3. Replace the deleted node with the new Block Node. + auto *block_node = static_cast( + graph_->nodes.Create(Node::Type::kFunctionBlock)); + auto io = ExtractInputAndOutputOfSubGraph(subgraph); + block_node->inlinks = std::move(io.first); + block_node->outlinks = std::move(io.second); + for (auto *node : subgraph) { + // TODO(Superjomn) need a unified mechanism to treat deleted node in each + // pass. + node->SetDeleted(); + block_node->subgraph.push_back(node); + } + + // Change all the sub-graph's inputs and outputs corresponding inlink and + // outlink to this sub-graph node. + auto inlink_or_outlink_cleaner = [&](std::vector &nodes) { + for (auto *&n : nodes) { + if (subgraph_uniq.count(n)) { + n = block_node; + } + } + std::unordered_set uniq(nodes.begin(), nodes.end()); + nodes.assign(uniq.begin(), uniq.end()); + }; + for (auto *i : block_node->inlinks) { + inlink_or_outlink_cleaner(i->outlinks); + } + for (auto *&o : block_node->outlinks) { + inlink_or_outlink_cleaner(o->inlinks); + } + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/subgraph_splitter.h b/paddle/fluid/inference/analysis/subgraph_splitter.h new file mode 100644 index 0000000000..a31afbe693 --- /dev/null +++ b/paddle/fluid/inference/analysis/subgraph_splitter.h @@ -0,0 +1,83 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file defines the the class to partition a graph. + */ + +#pragma once + +#include + +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/node.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Detect the nodes in a sub-graph that meet some conditions. This class doesn't + * modify the graph. + */ +class SubGraphSplitter { + public: + static const char *kMarkerAttrName; + // Tell whether a node is inside a sub-graph. + using NodeInsideSubgraphTeller = std::function; + + SubGraphSplitter(DataFlowGraph *graph, const NodeInsideSubgraphTeller &teller) + : graph_(graph), node_inside_subgraph_teller_(teller) {} + + std::vector> operator()(); + + protected: + // Mark the nodes inside the accepted sub-graph using + // node_inside_subgraph_teller. + void MarkNodesInsideSubGraph(); + + // Merge the marked nodes into sub-graphs and return the sub-graphs. + std::vector> ExtractSubGraphs(); + + private: + DataFlowGraph *graph_; + NodeInsideSubgraphTeller node_inside_subgraph_teller_; +}; + +/* + * SubGraphFuse - Replace some nodes with the sub-graph node they are inside. To + * some extent, the TensorRT engine is just a fusion op for a model. + */ +class SubGraphFuse { + public: + using NodeInsideSubgraphTeller = SubGraphSplitter::NodeInsideSubgraphTeller; + + SubGraphFuse(DataFlowGraph *graph, const NodeInsideSubgraphTeller &teller) + : graph_(graph), node_inside_subgraph_teller_(teller) {} + + // The main method which run all the logic. + void operator()(); + + protected: + // Remove the nodes inside sub-graphs and replace with the SubGraphNode. + void ReplaceNodesWithSubGraphs(); + + private: + DataFlowGraph *graph_; + NodeInsideSubgraphTeller node_inside_subgraph_teller_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc new file mode 100644 index 0000000000..39cc433b40 --- /dev/null +++ b/paddle/fluid/inference/analysis/subgraph_splitter_tester.cc @@ -0,0 +1,90 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +SubGraphSplitter::NodeInsideSubgraphTeller teller = [](const Node* node) { + if (node->type() != Node::Type::kFunction) return false; + const auto* func = static_cast(node); + if (func->func_type() == "elementwise_add" || func->func_type() == "relu" || + func->func_type() == "conv2d" || func->func_type() == "mul" || + func->func_type() == "sigmoid" || func->func_type() == "softmax") { + LOG(INFO) << "sub-graph marked " << node->repr(); + return true; + } + return false; +}; + +TEST(SubGraphSplitter, Split) { + auto desc = LoadProgramDesc(FLAGS_inference_model_dir + "/__model__"); + auto dfg = ProgramDescToDFG(desc); + LOG(INFO) << "spliter\n" << dfg.DotString(); + + ASSERT_GT(dfg.nodes.size(), 5UL); + + auto subgraphs = SubGraphSplitter(&dfg, teller)(); + + // Check the number of the marked nodes. + int marked_nodes = 0; + for (auto& node : dfg.nodes.nodes()) { + if (node->IsFunction() && + node->attr(SubGraphSplitter::kMarkerAttrName).Bool()) { + ++marked_nodes; + } + } + EXPECT_EQ(marked_nodes, 6); + + // For human debug. + for (auto& subgraph : subgraphs) { + LOG(INFO) << "subgraph size " << subgraph.size(); + for (auto* node : subgraph) { + LOG(INFO) << "node " << node->repr(); + } + } + + ASSERT_EQ(subgraphs.size(), 1UL); + // The last sub-graph has 5 Functions. + ASSERT_EQ(subgraphs.back().size(), 6UL); +} + +TEST(SubGraphSplitter, Fuse) { + auto desc = LoadProgramDesc(FLAGS_inference_model_dir + "/__model__"); + auto dfg = ProgramDescToDFG(desc); + + size_t count0 = dfg.nodes.size(); + + SubGraphFuse fuse(&dfg, teller); + fuse(); + + int count1 = 0; + for (auto& node : dfg.nodes.nodes()) { + if (node->deleted()) { + LOG(INFO) << "deleted " << node->repr(); + } + count1 += node->deleted(); + } + + // At least one nodes should be deleted. + ASSERT_EQ(dfg.nodes.size(), count0 + 1); // added a new FunctionBlock + ASSERT_EQ(6, count1); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.cc new file mode 100644 index 0000000000..f736e385c1 --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.cc @@ -0,0 +1,80 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/node_attr_flags.h" +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void TensorRTSubgraphNodeMarkPass::Run(DataFlowGraph *graph) { + for (auto &node : graph->nodes.nodes()) { + node->attr(ATTR_supported_by_tensorrt).Bool() = teller_(node.get()); + } +} + +class DfgDebuggerPass : public DFG_GraphvizDrawPass { + public: + explicit DfgDebuggerPass(const DFG_GraphvizDrawPass::Config &config) + : DFG_GraphvizDrawPass(config) {} + + std::string repr() const override { + return "tensorrt-subgraph-node-mark-debugger"; + } + + bool Finalize() override { return true; } + + protected: + std::string Draw(DataFlowGraph *graph) override { + Dot dot; + // Add nodes + for (size_t i = 0; i < graph->nodes.size(); i++) { + const Node &node = graph->nodes.Get(i); + if (config_.display_deleted_node || !node.deleted()) { + auto dot_attr = node.dot_attrs(); + if (node.attr(ATTR_supported_by_tensorrt).Bool()) { + dot_attr.assign( + {Dot::Attr{"color", "green"}, Dot::Attr{"style", "filled"}}); + } + dot.AddNode(node.repr(), dot_attr); + } + } + // Add edges + for (size_t i = 0; i < graph->nodes.size(); i++) { + const Node &node = graph->nodes.Get(i); + if (!config_.display_deleted_node && node.deleted()) continue; + for (auto &in : node.inlinks) { + if (!config_.display_deleted_node && in->deleted()) continue; + dot.AddEdge(in->repr(), node.repr(), {}); + } + } + return dot.Build(); + } +}; + +Pass *TensorRTSubgraphNodeMarkPass::CreateGraphvizDebugerPass() const { + DFG_GraphvizDrawPass::Config config( + FLAGS_inference_analysis_graphviz_log_root, "tensorrt_marked_node"); + return new DfgDebuggerPass(config); +} +bool TensorRTSubgraphNodeMarkPass::Finalize() { return true; } + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h new file mode 100644 index 0000000000..c558a6ebbd --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h @@ -0,0 +1,60 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + * This file defines TensorRTSubgraphNodeMarkPass which helps to mark the ops + * that supported by TensorRT engine. + */ + +#pragma once + +#include +#include "paddle/fluid/inference/analysis/pass.h" +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Mark the operators that TensorRT engine supports. + */ +class TensorRTSubgraphNodeMarkPass : public DataFlowGraphPass { + public: + using teller_t = SubGraphSplitter::NodeInsideSubgraphTeller; + + explicit TensorRTSubgraphNodeMarkPass(const teller_t& teller) + : teller_(teller) {} + + bool Initialize(Argument* argument) override { return true; } + + // This class get a sub-graph as input and determine whether to transform this + // sub-graph into TensorRT. + void Run(DataFlowGraph* graph) override; + + std::string repr() const override { return "tensorrt-sub-subgraph-mark"; } + std::string description() const override { + return "tensorrt sub-graph mark pass"; + } + + Pass* CreateGraphvizDebugerPass() const override; + bool Finalize() override; + + private: + teller_t teller_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass_tester.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass_tester.cc new file mode 100644 index 0000000000..c1d932878e --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass_tester.cc @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_node_mark_pass.h" + +#include +#include "paddle/fluid/inference/analysis/node_attr_flags.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TEST(TensorRTSubgraphNodeMarkPass, test) { + // init + FluidToDataFlowGraphPass pass; + Argument argument(FLAGS_inference_model_dir); + ASSERT_TRUE(pass.Initialize(&argument)); + pass.Run(argument.main_dfg.get()); + + TensorRTSubgraphNodeMarkPass::teller_t teller = [](const Node* node) { + return node->IsFunction() && + static_cast(node)->func_type() == "mul"; + }; + TensorRTSubgraphNodeMarkPass pass1(teller); + ASSERT_TRUE(pass1.Initialize(&argument)); + pass1.Run(argument.main_dfg.get()); + + int counter{0}; + for (auto& node : argument.main_dfg->nodes.nodes()) { + counter += node->attr(ATTR_supported_by_tensorrt).Bool(); + } + ASSERT_EQ(counter, 2); + LOG(INFO) << counter << " nodes marked"; +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc new file mode 100644 index 0000000000..faf876de6d --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.cc @@ -0,0 +1,36 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h" +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" + +namespace paddle { +namespace inference { +namespace analysis { + +TensorRTSubGraphPass::TensorRTSubGraphPass( + const TensorRTSubGraphPass::NodeInsideSubgraphTeller &teller) + : node_inside_subgraph_teller_(teller) {} + +void TensorRTSubGraphPass::Run(DataFlowGraph *graph) { + SubGraphFuse(graph, node_inside_subgraph_teller_)(); + VLOG(4) << "debug info " + << graph->HumanReadableInfo(false /*show_values*/, + true /*show_functions*/); +} + +} // namespace analysis +} // namespace inference + +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h new file mode 100644 index 0000000000..c6741a9209 --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/inference/analysis/node.h" +#include "paddle/fluid/inference/analysis/pass.h" +#include "paddle/fluid/inference/analysis/subgraph_splitter.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * Parse the graph and replace TensorRT supported nodes with SubGraphNode + */ +class TensorRTSubGraphPass : public DataFlowGraphPass { + public: + // Tell whether to transform a sub-graph into TensorRT. + using NodeInsideSubgraphTeller = SubGraphFuse::NodeInsideSubgraphTeller; + + explicit TensorRTSubGraphPass(const NodeInsideSubgraphTeller& teller); + + bool Initialize(Argument* argument) override { return true; } + + // This class get a sub-graph as input and determine whether to transform this + // sub-graph into TensorRT. + void Run(DataFlowGraph* graph) override; + + bool Finalize() override { return true; } + + std::string repr() const override { return "tensorrt-sub-graph"; } + std::string description() const override { return "tensorrt sub graph pass"; } + + private: + NodeInsideSubgraphTeller node_inside_subgraph_teller_; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc new file mode 100644 index 0000000000..67a5af83d8 --- /dev/null +++ b/paddle/fluid/inference/analysis/tensorrt_subgraph_pass_tester.cc @@ -0,0 +1,69 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/analysis/tensorrt_subgraph_pass.h" + +#include +#include +#include "paddle/fluid/inference/analysis/dfg_graphviz_draw_pass.h" +#include "paddle/fluid/inference/analysis/ut_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +DEFINE_string(dot_dir, "./", ""); + +TEST(TensorRTSubGraphPass, main) { + std::unordered_set teller_set( + {"elementwise_add", "mul", "sigmoid"}); + SubGraphSplitter::NodeInsideSubgraphTeller teller = [&](const Node* node) { + if (node->type() != Node::Type::kFunction) return false; + const auto* func = static_cast(node); + if (teller_set.count(func->func_type())) return true; + return false; + }; + + Argument argument(FLAGS_inference_model_dir); + + DFG_GraphvizDrawPass::Config config{FLAGS_dot_dir, "origin"}; + DFG_GraphvizDrawPass::Config config1{FLAGS_dot_dir, "fusion"}; + + DFG_GraphvizDrawPass dfg_pass(config); + DFG_GraphvizDrawPass dfg_pass1(config1); + FluidToDataFlowGraphPass pass0; + TensorRTSubGraphPass trt_pass(std::move(teller)); + + dfg_pass.Initialize(&argument); + dfg_pass1.Initialize(&argument); + pass0.Initialize(&argument); + trt_pass.Initialize(&argument); + + argument.main_dfg.reset(new DataFlowGraph); + pass0.Run(argument.main_dfg.get()); + dfg_pass.Run(argument.main_dfg.get()); + trt_pass.Run(argument.main_dfg.get()); + dfg_pass1.Run(argument.main_dfg.get()); + + // Check the TRT op's block desc + for (auto& node : argument.main_dfg->nodes.nodes()) { + if (node->IsFunctionBlock()) { + LOG(INFO) << "get function block"; + } + } +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/ut_helper.h b/paddle/fluid/inference/analysis/ut_helper.h new file mode 100644 index 0000000000..1073a6f686 --- /dev/null +++ b/paddle/fluid/inference/analysis/ut_helper.h @@ -0,0 +1,60 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include +#include +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/inference/analysis/data_flow_graph.h" +#include "paddle/fluid/inference/analysis/fluid_to_data_flow_graph_pass.h" +#include "paddle/fluid/inference/analysis/helper.h" + +namespace paddle { +namespace inference { + +// Read ProgramDesc from a __model__ file, defined in io.cc +extern void ReadBinaryFile(const std::string& filename, std::string* contents); + +namespace analysis { + +DEFINE_string(inference_model_dir, "", "inference test model dir"); + +static DataFlowGraph ProgramDescToDFG( + const framework::proto::ProgramDesc& desc) { + DataFlowGraph graph; + FluidToDataFlowGraphPass pass; + Argument argument; + argument.fluid_model_dir.reset(new std::string(FLAGS_inference_model_dir)); + argument.origin_program_desc.reset(new framework::proto::ProgramDesc(desc)); + pass.Initialize(&argument); + pass.Run(&graph); + pass.Finalize(); + return graph; +} + +class DFG_Tester : public ::testing::Test { + protected: + void SetUp() override { + auto desc = LoadProgramDesc(FLAGS_inference_model_dir + "/__model__"); + argument.origin_program_desc.reset(new framework::proto::ProgramDesc(desc)); + } + + Argument argument; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/api/CMakeLists.txt b/paddle/fluid/inference/api/CMakeLists.txt new file mode 100644 index 0000000000..83867e0a2c --- /dev/null +++ b/paddle/fluid/inference/api/CMakeLists.txt @@ -0,0 +1,78 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +if(APPLE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=pessimizing-move") +endif(APPLE) + + +set(inference_deps paddle_inference_api paddle_fluid_api) + +if(WITH_GPU AND TENSORRT_FOUND) + set(inference_deps ${inference_deps} paddle_inference_tensorrt_subgraph_engine) +endif() + +function(inference_api_test TARGET_NAME) + if (WITH_TESTING) + set(options "") + set(oneValueArgs SRC) + set(multiValueArgs ARGS) + cmake_parse_arguments(inference_test "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + + set(PYTHON_TESTS_DIR ${PADDLE_BINARY_DIR}/python/paddle/fluid/tests) + cc_test(${TARGET_NAME} + SRCS ${inference_test_SRC} + DEPS "${inference_deps}" + ARGS --dirname=${PYTHON_TESTS_DIR}/book/) + if(inference_test_ARGS) + set_tests_properties(${TARGET_NAME} + PROPERTIES DEPENDS "${inference_test_ARGS}") + endif() + endif(WITH_TESTING) +endfunction(inference_api_test) + +cc_library(paddle_inference_api SRCS api.cc api_impl.cc DEPS lod_tensor) + +cc_test(test_paddle_inference_api + SRCS api_tester.cc + DEPS paddle_inference_api) + +inference_api_test(test_api_impl SRC api_impl_tester.cc + ARGS test_word2vec test_image_classification) + +if(WITH_GPU AND TENSORRT_FOUND) +cc_library(paddle_inference_tensorrt_subgraph_engine + SRCS api_tensorrt_subgraph_engine.cc + DEPS paddle_inference_api analysis tensorrt_engine paddle_inference_api paddle_fluid_api tensorrt_converter) + +inference_api_test(test_api_tensorrt_subgraph_engine SRC api_tensorrt_subgraph_engine_tester.cc ARGS test_word2vec) +endif() + +if (WITH_ANAKIN) # only needed in CI + # compile the libinference_anakin_api.a and anakin.so. + nv_library(inference_anakin_api SRCS api.cc api_anakin_engine.cc DEPS anakin_shared anakin_saber) + #nv_library(inference_anakin_api_shared SHARED SRCS api.cc api_anakin_engine.cc DEPS anakin) + function(anakin_target target_name) + target_compile_options(${target_name} BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) + endfunction() + anakin_target(inference_anakin_api) + #anakin_target(inference_anakin_api_shared) + if (WITH_TESTING) + cc_test(inference_anakin_test SRCS api_anakin_engine_tester.cc + ARGS --model=${ANAKIN_SOURCE_DIR}/mobilenet_v2.anakin.bin + DEPS inference_anakin_api dynload_cuda SERIAL) + target_compile_options(inference_anakin_test BEFORE PUBLIC ${ANAKIN_COMPILE_EXTRA_FLAGS}) + endif(WITH_TESTING) +endif() diff --git a/paddle/fluid/inference/api/README.md b/paddle/fluid/inference/api/README.md new file mode 100644 index 0000000000..20969fac6c --- /dev/null +++ b/paddle/fluid/inference/api/README.md @@ -0,0 +1,27 @@ +# Embed Paddle Inference in Your Application + +Paddle inference offers the APIs in `C` and `C++` languages. + +One can easily deploy a model trained by Paddle following the steps as below: + +1. Optimize the native model; +2. Write some codes for deployment. + + +Let's explain the steps in detail. + +## Optimize the native Fluid Model + +The native model that get from the training phase needs to be optimized for that. + +- Clean the noise such as the cost operators that do not need inference; +- Prune unnecessary computation fork that has nothing to do with the output; +- Remove extraneous variables; +- Memory reuse for native Fluid executor; +- Translate the model storage format to some third-party engine's, so that the inference API can utilize the engine for acceleration; + +We have an official tool to do the optimization, call `paddle_inference_optimize --help` for more information. + +## Write some codes + +Read `paddle_inference_api.h` for more information. diff --git a/paddle/fluid/inference/api/api.cc b/paddle/fluid/inference/api/api.cc new file mode 100644 index 0000000000..63c3f0d7b3 --- /dev/null +++ b/paddle/fluid/inference/api/api.cc @@ -0,0 +1,95 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/api/paddle_inference_api.h" + +namespace paddle { + +int PaddleDtypeSize(PaddleDType dtype) { + switch (dtype) { + case PaddleDType::FLOAT32: + return sizeof(float); + case PaddleDType::INT64: + return sizeof(int64_t); + default: + assert(false); + return -1; + } +} + +PaddleBuf::PaddleBuf(PaddleBuf&& other) + : data_(other.data_), + length_(other.length_), + memory_owned_(other.memory_owned_) { + other.memory_owned_ = false; + other.data_ = nullptr; + other.length_ = 0; +} + +PaddleBuf::PaddleBuf(const PaddleBuf& other) { *this = other; } + +PaddleBuf& PaddleBuf::operator=(const PaddleBuf& other) { + if (!other.memory_owned_) { + data_ = other.data_; + length_ = other.length_; + memory_owned_ = other.memory_owned_; + } else { + Resize(other.length()); + memcpy(data_, other.data(), other.length()); + length_ = other.length(); + memory_owned_ = true; + } + return *this; +} + +PaddleBuf& PaddleBuf::operator=(PaddleBuf&& other) { + // only the buffer with external memory can be copied + data_ = other.data_; + length_ = other.length_; + memory_owned_ = other.memory_owned_; + other.data_ = nullptr; + other.length_ = 0; + other.memory_owned_ = false; + return *this; +} + +void PaddleBuf::Resize(size_t length) { + // Only the owned memory can be reset, the external memory can't be changed. + if (length_ == length) return; + if (memory_owned_) { + Free(); + } + data_ = new char[length]; + length_ = length; + memory_owned_ = true; +} + +void PaddleBuf::Reset(void* data, size_t length) { + Free(); + memory_owned_ = false; + data_ = data; + length_ = length; +} + +void PaddleBuf::Free() { + if (memory_owned_ && data_) { + assert(length_ > 0); + delete[] static_cast(data_); + data_ = nullptr; + length_ = 0; + } +} + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_anakin_engine.cc b/paddle/fluid/inference/api/api_anakin_engine.cc new file mode 100644 index 0000000000..6b374ceefb --- /dev/null +++ b/paddle/fluid/inference/api/api_anakin_engine.cc @@ -0,0 +1,167 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/api/api_anakin_engine.h" +#include +#include + +namespace paddle { + +template +PaddleInferenceAnakinPredictor::PaddleInferenceAnakinPredictor( + const AnakinConfig &config) { + CHECK(Init(config)); +} + +template +bool PaddleInferenceAnakinPredictor::Init(const AnakinConfig &config) { + if (!(graph_.load(config.model_file))) { + LOG(FATAL) << "fail to load graph from " << config.model_file; + return false; + } + auto inputs = graph_.get_ins(); + for (auto &input_str : inputs) { + graph_.ResetBatchSize(input_str, config.max_batch_size); + } + // optimization for graph + if (!(graph_.Optimize())) { + return false; + } + // construct executer + if (executor_p_ == nullptr) { + executor_p_ = new anakin::Net(graph_, true); + } + return true; +} + +template +bool PaddleInferenceAnakinPredictor::Run( + const std::vector &inputs, + std::vector *output_data, int batch_size) { + for (const auto &input : inputs) { + if (input.dtype != PaddleDType::FLOAT32) { + LOG(ERROR) << "Only support float type inputs. " << input.name + << "'s type is not float"; + return false; + } + auto d_tensor_in_p = executor_p_->get_in(input.name); + auto net_shape = d_tensor_in_p->valid_shape(); + if (net_shape.size() != input.shape.size()) { + LOG(ERROR) << " input " << input.name + << "'s shape size should be equal to that of net"; + return false; + } + int sum = 1; + for_each(input.shape.begin(), input.shape.end(), [&](int n) { sum *= n; }); + if (sum > net_shape.count()) { + graph_.Reshape(input.name, input.shape); + delete executor_p_; + executor_p_ = new anakin::Net(graph_, true); + d_tensor_in_p = executor_p_->get_in(input.name); + } + + anakin::saber::Shape tmp_shape; + for (auto s : input.shape) { + tmp_shape.push_back(s); + } + d_tensor_in_p->reshape(tmp_shape); + + float *d_data_p = d_tensor_in_p->mutable_data(); + if (cudaMemcpy(d_data_p, static_cast(input.data.data()), + d_tensor_in_p->valid_size() * sizeof(float), + cudaMemcpyHostToDevice) != 0) { + LOG(ERROR) << "copy data from CPU to GPU error"; + return false; + } + cudaStreamSynchronize(NULL); + } + cudaDeviceSynchronize(); + executor_p_->prediction(); + cudaDeviceSynchronize(); + + if (output_data->empty()) { + LOG(ERROR) << "At least one output should be set with tensors' names."; + return false; + } + for (auto &output : *output_data) { + auto *tensor = executor_p_->get_out(output.name); + output.shape = tensor->valid_shape(); + if (output.data.length() < tensor->valid_size() * sizeof(float)) { + output.data.Resize(tensor->valid_size() * sizeof(float)); + } + // Copy data from GPU -> CPU + if (cudaMemcpy(output.data.data(), tensor->mutable_data(), + tensor->valid_size() * sizeof(float), + cudaMemcpyDeviceToHost) != 0) { + LOG(ERROR) << "copy data from GPU to CPU error"; + return false; + } + cudaStreamSynchronize(NULL); + } + return true; +} + +template +anakin::Net + &PaddleInferenceAnakinPredictor::get_executer() { + return *executor_p_; +} + +// the cloned new Predictor of anakin share the same net weights from original +// Predictor +template +std::unique_ptr +PaddleInferenceAnakinPredictor::Clone() { + VLOG(3) << "Anakin Predictor::clone"; + std::unique_ptr cls( + new PaddleInferenceAnakinPredictor()); + // construct executer from other graph + auto anakin_predictor_p = + dynamic_cast *>(cls.get()); + if (!anakin_predictor_p) { + LOG(ERROR) << "fail to call Init"; + return nullptr; + } + anakin_predictor_p->get_executer().init(graph_); + + return std::move(cls); +} + +template class PaddleInferenceAnakinPredictor; +template class PaddleInferenceAnakinPredictor; + +// A factory to help create difference predictor. +template <> +std::unique_ptr CreatePaddlePredictor< + AnakinConfig, PaddleEngineKind::kAnakin>(const AnakinConfig &config) { + VLOG(3) << "Anakin Predictor create."; + if (config.target_type == AnakinConfig::NVGPU) { + VLOG(3) << "Anakin Predictor create on [ NVIDIA GPU ]."; + std::unique_ptr x( + new PaddleInferenceAnakinPredictor(config)); + return x; + } else if (config.target_type == AnakinConfig::X86) { + VLOG(3) << "Anakin Predictor create on [ Intel X86 ]."; + std::unique_ptr x( + new PaddleInferenceAnakinPredictor(config)); + return x; + } else { + VLOG(3) << "Anakin Predictor create on unknown platform."; + return nullptr; + } +}; + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_anakin_engine.h b/paddle/fluid/inference/api/api_anakin_engine.h new file mode 100644 index 0000000000..836badd979 --- /dev/null +++ b/paddle/fluid/inference/api/api_anakin_engine.h @@ -0,0 +1,65 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains the implementation of inference API with Anakin engine + * embeded, this API can only support Anakin models. + */ + +#pragma once + +#include + +#include "framework/core/net/net.h" +#include "framework/graph/graph.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "saber/core/shape.h" +#include "saber/saber_types.h" + +namespace paddle { + +template +class PaddleInferenceAnakinPredictor : public PaddlePredictor { + public: + PaddleInferenceAnakinPredictor() {} + + explicit PaddleInferenceAnakinPredictor(const AnakinConfig& config); + + // NOTE Unlike the native engine, the buffers of anakin engine's output_data + // should be allocated first. + bool Run(const std::vector& inputs, + std::vector* output_data, + int batch_size = -1) override; + + std::unique_ptr Clone() override; + + anakin::Net& + get_executer(); + + ~PaddleInferenceAnakinPredictor() override { + delete executor_p_; + executor_p_ = nullptr; + }; + + private: + bool Init(const AnakinConfig& config); + + anakin::graph::Graph + graph_; + anakin::Net* + executor_p_{nullptr}; + AnakinConfig config_; +}; + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_anakin_engine_tester.cc b/paddle/fluid/inference/api/api_anakin_engine_tester.cc new file mode 100644 index 0000000000..62e820b68c --- /dev/null +++ b/paddle/fluid/inference/api/api_anakin_engine_tester.cc @@ -0,0 +1,66 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "gflags/gflags.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" + +DEFINE_string(model, "", "Directory of the inference model(mobile_v2)."); + +namespace paddle { + +AnakinConfig GetConfig() { + AnakinConfig config; + // using AnakinConfig::X86 if you need to use cpu to do inference + config.target_type = AnakinConfig::NVGPU; + config.model_file = FLAGS_model; + config.device = 0; + config.max_batch_size = 1; + return config; +} + +TEST(inference, anakin) { + AnakinConfig config = GetConfig(); + auto predictor = + CreatePaddlePredictor(config); + + float data[1 * 3 * 224 * 224] = {1.0f}; + PaddleTensor tensor; + tensor.name = "input_0"; + tensor.shape = std::vector({1, 3, 224, 224}); + tensor.data = PaddleBuf(data, sizeof(data)); + tensor.dtype = PaddleDType::FLOAT32; + + // For simplicity, we set all the slots with the same data. + std::vector paddle_tensor_feeds(1, tensor); + + PaddleTensor tensor_out; + tensor_out.name = "prob_out"; + tensor_out.shape = std::vector({}); + tensor_out.data = PaddleBuf(); + tensor_out.dtype = PaddleDType::FLOAT32; + + std::vector outputs(1, tensor_out); + + ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); + + float* data_o = static_cast(outputs[0].data.data()); + for (size_t j = 0; j < outputs[0].data.length(); ++j) { + LOG(INFO) << "output[" << j << "]: " << data_o[j]; + } +} + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc new file mode 100644 index 0000000000..08d7af6d3a --- /dev/null +++ b/paddle/fluid/inference/api/api_impl.cc @@ -0,0 +1,297 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "paddle/fluid/inference/api/api_impl.h" + +namespace paddle { +namespace { + +// Timer for timer +class Timer { + public: + double start; + double startu; + void tic() { + struct timeval tp; + gettimeofday(&tp, NULL); + start = tp.tv_sec; + startu = tp.tv_usec; + } + double toc() { + struct timeval tp; + gettimeofday(&tp, NULL); + double used_time_ms = + (tp.tv_sec - start) * 1000.0 + (tp.tv_usec - startu) / 1000.0; + return used_time_ms; + } +}; + +template +std::string num2str(T a) { + std::stringstream istr; + istr << a; + return istr.str(); +} +} // namespace + +bool NativePaddlePredictor::Init( + std::shared_ptr parent_scope) { + VLOG(3) << "Predictor::init()"; + + if (config_.use_gpu) { + place_ = paddle::platform::CUDAPlace(config_.device); + } else { + place_ = paddle::platform::CPUPlace(); + } + if (parent_scope) { + scope_ = parent_scope; + sub_scope_ = &(parent_scope->NewScope()); + PADDLE_ENFORCE_NOT_NULL(sub_scope_, "create sub scope fail"); + } else { + paddle::framework::InitDevices(false); + scope_.reset(new paddle::framework::Scope()); + } + + executor_.reset(new paddle::framework::Executor(place_)); + + // Initialize the inference program + if (!config_.model_dir.empty()) { + // Parameters are saved in separate files sited in + // the specified `dirname`. + inference_program_ = paddle::inference::Load(executor_.get(), scope_.get(), + config_.model_dir); + } else if (!config_.prog_file.empty() && !config_.param_file.empty()) { + // All parameters are saved in a single file. + // The file names should be consistent with that used + // in Python API `fluid.io.save_inference_model`. + inference_program_ = paddle::inference::Load( + executor_.get(), scope_.get(), config_.prog_file, config_.param_file); + } else { + LOG(ERROR) << "fail to load inference model."; + return false; + } + + ctx_ = executor_->Prepare(*inference_program_, 0); + executor_->CreateVariables(*inference_program_, + sub_scope_ ? sub_scope_ : scope_.get(), 0); + + // Get the feed_target_names and fetch_target_names + feed_target_names_ = inference_program_->GetFeedTargetNames(); + fetch_target_names_ = inference_program_->GetFetchTargetNames(); + return true; +} + +NativePaddlePredictor::~NativePaddlePredictor() { + if (sub_scope_) { + scope_->DeleteScope(sub_scope_); + } +} + +bool NativePaddlePredictor::Run(const std::vector &inputs, + std::vector *output_data, + int batch_size) { + VLOG(3) << "Predictor::predict"; + Timer timer; + timer.tic(); + // set feed variable + std::map feed_targets; + std::vector feeds; + if (!SetFeed(inputs, &feeds)) { + LOG(ERROR) << "fail to set feed"; + return false; + } + for (size_t i = 0; i < feed_target_names_.size(); ++i) { + VLOG(4) << "setting " << i << "-th target"; + feed_targets[feed_target_names_[i]] = &feeds[i]; + } + // get fetch variable + std::map fetch_targets; + std::vector fetchs; + fetchs.resize(fetch_target_names_.size()); + for (size_t i = 0; i < fetch_target_names_.size(); ++i) { + fetch_targets[fetch_target_names_[i]] = &fetchs[i]; + } + // Run the inference program + // if share variables, we need not create variables + VLOG(4) << "Run prepared context"; + executor_->RunPreparedContext( + ctx_.get(), sub_scope_ != nullptr ? sub_scope_ : scope_.get(), + &feed_targets, &fetch_targets, + false, /* don't create local scope each time*/ + false /* don't create variable eatch time */); + VLOG(4) << "Finish prepared context"; + if (!GetFetch(fetchs, output_data)) { + LOG(ERROR) << "fail to get fetches"; + return false; + } + VLOG(3) << "predict cost: " << timer.toc() << "ms"; + return true; +} + +std::unique_ptr NativePaddlePredictor::Clone() { + VLOG(3) << "Predictor::clone"; + std::unique_ptr cls(new NativePaddlePredictor(config_)); + + if (!dynamic_cast(cls.get())->Init(scope_)) { + LOG(ERROR) << "fail to call Init"; + return nullptr; + } + // fix manylinux compile error. + return std::move(cls); +} + +bool NativePaddlePredictor::SetFeed(const std::vector &inputs, + std::vector *feeds) { + VLOG(3) << "Predictor::set_feed"; + if (inputs.size() != feed_target_names_.size()) { + LOG(ERROR) << "wrong feed input size."; + return false; + } + for (size_t i = 0; i < feed_target_names_.size(); ++i) { + framework::LoDTensor input; + framework::DDim ddim = framework::make_ddim(inputs[i].shape); + void *input_ptr; + if (inputs[i].dtype == PaddleDType::INT64) { + input_ptr = input.mutable_data(ddim, platform::CPUPlace()); + } else if (inputs[i].dtype == PaddleDType::FLOAT32) { + input_ptr = input.mutable_data(ddim, platform::CPUPlace()); + } else { + LOG(ERROR) << "unsupported feed type " << inputs[i].dtype; + return false; + } + + // TODO(panyx0718): Init LoDTensor from existing memcpy to save a copy. + std::memcpy(static_cast(input_ptr), inputs[i].data.data(), + inputs[i].data.length()); + // TODO(Superjomn) Low performance, need optimization for heavy LoD copy. + framework::LoD lod; + for (auto &level : inputs[i].lod) { + lod.emplace_back(level); + } + input.set_lod(lod); + + feeds->push_back(input); + } + return true; +} + +bool NativePaddlePredictor::GetFetch( + const std::vector &fetchs, + std::vector *outputs) { + VLOG(3) << "Predictor::get_fetch"; + outputs->resize(fetchs.size()); + for (size_t i = 0; i < fetchs.size(); ++i) { + // TODO(panyx0718): Support fetch of other types. + if (fetchs[i].type() != typeid(float)) { + LOG(ERROR) << "only support fetching float now."; + return false; + } + std::vector shape; + auto dims_i = fetchs[i].dims(); + auto lod = fetchs[i].lod(); + const float *output_ptr = fetchs[i].data(); + // const int64_t* output_ptr = fetchs[i].data(); + auto num = fetchs[i].numel(); + std::vector data; + if (0 == lod.size()) { + std::copy(output_ptr, output_ptr + num, std::back_inserter(data)); + for (int j = 0; j < dims_i.size(); ++j) { + shape.push_back(dims_i[j]); + } + } else { + // for batch detection + // image[0] -> output[0] shape {145, 6} + // image[1] -> output[1] shape {176, 6} + // then, + // the batch output shape {321, 6} + // the lod {{0, 145, 321}} + // so we should append output[0] to {176, 6} + size_t max_dim = 0; + for (size_t j = 1; j < lod[0].size(); j++) { + max_dim = std::max(max_dim, lod[0][j] - lod[0][j - 1]); + } + size_t common_dim = lod[0].back() == 0 ? 0 : num / lod[0].back(); + if (max_dim > 0) { + data.resize((lod[0].size() - 1) * max_dim * common_dim, 0); + } + for (size_t j = 1; j < lod[0].size(); j++) { + size_t start = lod[0][j - 1] * common_dim; + size_t end = lod[0][j] * common_dim; + if (end > start) { + std::copy(output_ptr + start, output_ptr + end, + data.begin() + (j - 1) * max_dim * common_dim); + } + } + shape.push_back(lod[0].size() - 1); + shape.push_back(max_dim); + for (int j = 1; j < dims_i.size(); ++j) { + shape.push_back(dims_i[j]); + } + } + + outputs->at(i).shape = shape; + auto &buffer = outputs->at(i).data; + if (buffer.empty() || buffer.length() < sizeof(float) * data.size()) { + buffer.Resize(sizeof(float) * data.size()); + } + std::memcpy(buffer.data(), data.data(), buffer.length()); + // copy LoD + for (const auto &level : fetchs[i].lod()) { + outputs->at(i).lod.emplace_back(level); + } + outputs->at(i).dtype = PaddleDType::FLOAT32; + // TODO(panyx0718): support other types? fill tensor name? avoid a copy. + } + return true; +} + +template <> +std::unique_ptr CreatePaddlePredictor< + NativeConfig, PaddleEngineKind::kNative>(const NativeConfig &config) { + VLOG(3) << "create NativePaddlePredictor"; + if (config.use_gpu) { + // 1. GPU memeroy + PADDLE_ENFORCE_GT( + config.fraction_of_gpu_memory, 0.f, + "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); + PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); + std::vector flags; + if (config.fraction_of_gpu_memory >= 0.0f || + config.fraction_of_gpu_memory <= 0.95f) { + flags.push_back("dummpy"); + std::string flag = "--fraction_of_gpu_memory_to_use=" + + num2str(config.fraction_of_gpu_memory); + flags.push_back(flag); + VLOG(3) << "set flag: " << flag; + framework::InitGflags(flags); + } + } + + std::unique_ptr predictor(new NativePaddlePredictor(config)); + if (!dynamic_cast(predictor.get())->Init(nullptr)) { + return nullptr; + } + return std::move(predictor); +} + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_impl.h b/paddle/fluid/inference/api/api_impl.h new file mode 100644 index 0000000000..4f28c3cd34 --- /dev/null +++ b/paddle/fluid/inference/api/api_impl.h @@ -0,0 +1,66 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#include +#include +#include +#include + +#include "paddle/fluid/inference/api/paddle_inference_api.h" + +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/inference/io.h" +#include "paddle/fluid/platform/init.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { + +class NativePaddlePredictor : public PaddlePredictor { + public: + explicit NativePaddlePredictor(const NativeConfig &config) + : config_(config) {} + + // will only create sub scope if have global scope + bool Init(std::shared_ptr parent_scope); + + bool Run(const std::vector &inputs, + std::vector *output_data, + int batch_size = -1) override; + + std::unique_ptr Clone() override; + + ~NativePaddlePredictor() override; + + protected: + bool SetFeed(const std::vector &input_datas, + std::vector *feeds); + bool GetFetch(const std::vector &fetchs, + std::vector *output_data); + + NativeConfig config_; + platform::Place place_; + std::unique_ptr executor_; + std::shared_ptr scope_; + std::unique_ptr ctx_; + std::unique_ptr inference_program_; + std::vector feed_target_names_; + std::vector fetch_target_names_; + // Do not use unique_ptr, use parent scope to delete + framework::Scope *sub_scope_{nullptr}; +}; + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_impl_tester.cc b/paddle/fluid/inference/api/api_impl_tester.cc new file mode 100644 index 0000000000..fc1364b80a --- /dev/null +++ b/paddle/fluid/inference/api/api_impl_tester.cc @@ -0,0 +1,288 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include // NOLINT + +#include "gflags/gflags.h" +#include "paddle/fluid/inference/api/api_impl.h" +#include "paddle/fluid/inference/tests/test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +namespace paddle { + +PaddleTensor LodTensorToPaddleTensor(framework::LoDTensor* t) { + PaddleTensor pt; + + if (t->type() == typeid(int64_t)) { + pt.data.Reset(t->data(), t->numel() * sizeof(int64_t)); + pt.dtype = PaddleDType::INT64; + } else if (t->type() == typeid(float)) { + pt.data.Reset(t->data(), t->numel() * sizeof(float)); + pt.dtype = PaddleDType::FLOAT32; + } else { + LOG(FATAL) << "unsupported type."; + } + pt.shape = framework::vectorize2int(t->dims()); + return pt; +} + +NativeConfig GetConfig() { + NativeConfig config; + config.model_dir = FLAGS_dirname + "word2vec.inference.model"; + LOG(INFO) << "dirname " << config.model_dir; + config.fraction_of_gpu_memory = 0.15; +#ifdef PADDLE_WITH_CUDA + config.use_gpu = true; +#else + config.use_gpu = false; +#endif + config.device = 0; + return config; +} + +void MainWord2Vec(bool use_gpu) { + NativeConfig config = GetConfig(); + auto predictor = CreatePaddlePredictor(config); + config.use_gpu = use_gpu; + + framework::LoDTensor first_word, second_word, third_word, fourth_word; + framework::LoD lod{{0, 1}}; + int64_t dict_size = 2073; // The size of dictionary + + SetupLoDTensor(&first_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&second_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&third_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&fourth_word, lod, static_cast(0), dict_size - 1); + + std::vector paddle_tensor_feeds; + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&first_word)); + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&second_word)); + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&third_word)); + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&fourth_word)); + + std::vector outputs; + ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); + ASSERT_EQ(outputs.size(), 1UL); + size_t len = outputs[0].data.length(); + float* data = static_cast(outputs[0].data.data()); + for (size_t j = 0; j < len / sizeof(float); ++j) { + ASSERT_LT(data[j], 1.0); + ASSERT_GT(data[j], -1.0); + } + + std::vector cpu_feeds; + cpu_feeds.push_back(&first_word); + cpu_feeds.push_back(&second_word); + cpu_feeds.push_back(&third_word); + cpu_feeds.push_back(&fourth_word); + + framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + TestInference(config.model_dir, cpu_feeds, cpu_fetchs1); + + float* lod_data = output1.data(); + for (int i = 0; i < output1.numel(); ++i) { + EXPECT_LT(lod_data[i] - data[i], 1e-3); + EXPECT_GT(lod_data[i] - data[i], -1e-3); + } +} + +void MainImageClassification(bool use_gpu) { + int batch_size = 2; + bool repeat = false; + NativeConfig config = GetConfig(); + config.use_gpu = use_gpu; + config.model_dir = + FLAGS_dirname + "image_classification_resnet.inference.model"; + + const bool is_combined = false; + std::vector> feed_target_shapes = + GetFeedTargetShapes(config.model_dir, is_combined); + + framework::LoDTensor input; + // Use normilized image pixels as input data, + // which should be in the range [0.0, 1.0]. + feed_target_shapes[0][0] = batch_size; + framework::DDim input_dims = framework::make_ddim(feed_target_shapes[0]); + SetupTensor(&input, input_dims, static_cast(0), + static_cast(1)); + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + TestInference( + config.model_dir, cpu_feeds, cpu_fetchs1, repeat, is_combined); + + auto predictor = CreatePaddlePredictor(config); + std::vector paddle_tensor_feeds; + paddle_tensor_feeds.push_back(LodTensorToPaddleTensor(&input)); + + std::vector outputs; + ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); + ASSERT_EQ(outputs.size(), 1UL); + size_t len = outputs[0].data.length(); + float* data = static_cast(outputs[0].data.data()); + float* lod_data = output1.data(); + for (size_t j = 0; j < len / sizeof(float); ++j) { + EXPECT_NEAR(lod_data[j], data[j], 1e-3); + } +} + +void MainThreadsWord2Vec(bool use_gpu) { + NativeConfig config = GetConfig(); + config.use_gpu = use_gpu; + auto main_predictor = CreatePaddlePredictor(config); + + // prepare inputs data and reference results + constexpr int num_jobs = 3; + std::vector> jobs(num_jobs); + std::vector> paddle_tensor_feeds(num_jobs); + std::vector refs(num_jobs); + for (size_t i = 0; i < jobs.size(); ++i) { + // each job has 4 words + jobs[i].resize(4); + for (size_t j = 0; j < 4; ++j) { + framework::LoD lod{{0, 1}}; + int64_t dict_size = 2073; // The size of dictionary + SetupLoDTensor(&jobs[i][j], lod, static_cast(0), dict_size - 1); + paddle_tensor_feeds[i].push_back(LodTensorToPaddleTensor(&jobs[i][j])); + } + + // get reference result of each job + std::vector ref_feeds; + std::vector ref_fetches(1, &refs[i]); + for (auto& word : jobs[i]) { + ref_feeds.push_back(&word); + } + TestInference(config.model_dir, ref_feeds, ref_fetches); + } + + // create threads and each thread run 1 job + std::vector threads; + for (int tid = 0; tid < num_jobs; ++tid) { + threads.emplace_back([&, tid]() { + auto predictor = main_predictor->Clone(); + auto& local_inputs = paddle_tensor_feeds[tid]; + std::vector local_outputs; + ASSERT_TRUE(predictor->Run(local_inputs, &local_outputs)); + + // check outputs range + ASSERT_EQ(local_outputs.size(), 1UL); + const size_t len = local_outputs[0].data.length(); + float* data = static_cast(local_outputs[0].data.data()); + for (size_t j = 0; j < len / sizeof(float); ++j) { + ASSERT_LT(data[j], 1.0); + ASSERT_GT(data[j], -1.0); + } + + // check outputs correctness + float* ref_data = refs[tid].data(); + EXPECT_EQ(refs[tid].numel(), static_cast(len / sizeof(float))); + for (int i = 0; i < refs[tid].numel(); ++i) { + EXPECT_NEAR(ref_data[i], data[i], 1e-3); + } + }); + } + for (int i = 0; i < num_jobs; ++i) { + threads[i].join(); + } +} + +void MainThreadsImageClassification(bool use_gpu) { + constexpr int num_jobs = 4; // each job run 1 batch + constexpr int batch_size = 1; + NativeConfig config = GetConfig(); + config.use_gpu = use_gpu; + config.model_dir = + FLAGS_dirname + "image_classification_resnet.inference.model"; + + auto main_predictor = CreatePaddlePredictor(config); + std::vector jobs(num_jobs); + std::vector> paddle_tensor_feeds(num_jobs); + std::vector refs(num_jobs); + for (size_t i = 0; i < jobs.size(); ++i) { + // prepare inputs + std::vector> feed_target_shapes = + GetFeedTargetShapes(config.model_dir, /*is_combined*/ false); + feed_target_shapes[0][0] = batch_size; + framework::DDim input_dims = framework::make_ddim(feed_target_shapes[0]); + SetupTensor(&jobs[i], input_dims, 0.f, 1.f); + paddle_tensor_feeds[i].push_back(LodTensorToPaddleTensor(&jobs[i])); + + // get reference result of each job + std::vector ref_feeds(1, &jobs[i]); + std::vector ref_fetches(1, &refs[i]); + TestInference(config.model_dir, ref_feeds, ref_fetches); + } + + // create threads and each thread run 1 job + std::vector threads; + for (int tid = 0; tid < num_jobs; ++tid) { + threads.emplace_back([&, tid]() { + auto predictor = main_predictor->Clone(); + auto& local_inputs = paddle_tensor_feeds[tid]; + std::vector local_outputs; + ASSERT_TRUE(predictor->Run(local_inputs, &local_outputs)); + + // check outputs correctness + ASSERT_EQ(local_outputs.size(), 1UL); + const size_t len = local_outputs[0].data.length(); + float* data = static_cast(local_outputs[0].data.data()); + float* ref_data = refs[tid].data(); + EXPECT_EQ((size_t)refs[tid].numel(), len / sizeof(float)); + for (int i = 0; i < refs[tid].numel(); ++i) { + EXPECT_NEAR(ref_data[i], data[i], 1e-3); + } + }); + } + for (int i = 0; i < num_jobs; ++i) { + threads[i].join(); + } +} + +TEST(inference_api_native, word2vec_cpu) { MainWord2Vec(false /*use_gpu*/); } +TEST(inference_api_native, word2vec_cpu_threads) { + MainThreadsWord2Vec(false /*use_gpu*/); +} +TEST(inference_api_native, image_classification_cpu) { + MainThreadsImageClassification(false /*use_gpu*/); +} +TEST(inference_api_native, image_classification_cpu_threads) { + MainThreadsImageClassification(false /*use_gpu*/); +} + +#ifdef PADDLE_WITH_CUDA +TEST(inference_api_native, word2vec_gpu) { MainWord2Vec(true /*use_gpu*/); } +TEST(inference_api_native, word2vec_gpu_threads) { + MainThreadsWord2Vec(true /*use_gpu*/); +} +TEST(inference_api_native, image_classification_gpu) { + MainThreadsImageClassification(true /*use_gpu*/); +} +TEST(inference_api_native, image_classification_gpu_threads) { + MainThreadsImageClassification(true /*use_gpu*/); +} + +#endif + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc new file mode 100644 index 0000000000..45b5a7638b --- /dev/null +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine.cc @@ -0,0 +1,152 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/api/api_impl.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" +#include "paddle/fluid/inference/utils/singleton.h" +#include "paddle/fluid/operators/tensorrt_engine_op.h" + +namespace paddle { + +using inference::analysis::Argument; +using inference::Singleton; +using inference::analysis::Analyzer; +using framework::proto::ProgramDesc; + +class TensorRTSubgraphPredictor : public NativePaddlePredictor { + public: + explicit TensorRTSubgraphPredictor(const TensorRTConfig& config) + : NativePaddlePredictor(config), config_(config) {} + + bool Init(const std::shared_ptr& parent_scope) { + VLOG(3) << "Predictor::init()"; + + if (config_.use_gpu) { + place_ = paddle::platform::CUDAPlace(config_.device); + } else { + place_ = paddle::platform::CPUPlace(); + } + if (parent_scope) { + scope_ = parent_scope; + sub_scope_ = &(parent_scope->NewScope()); + } else { + paddle::framework::InitDevices(false); + scope_.reset(new paddle::framework::Scope()); + } + + executor_.reset(new paddle::framework::Executor(place_)); + + // Initialize the inference program + if (!config_.model_dir.empty()) { + // Parameters are saved in separate files sited in + // the specified `dirname`. + inference_program_ = paddle::inference::Load( + executor_.get(), scope_.get(), config_.model_dir); + } else if (!config_.prog_file.empty() && !config_.param_file.empty()) { + // All parameters are saved in a single file. + // The file names should be consistent with that used + // in Python API `fluid.io.save_inference_model`. + inference_program_ = paddle::inference::Load( + executor_.get(), scope_.get(), config_.prog_file, config_.param_file); + } else { + LOG(ERROR) << "fail to load inference model."; + return false; + } + + OptimizeInferenceProgram(); + ctx_ = executor_->Prepare(*inference_program_, 0); + + VLOG(5) << "to create variables"; + executor_->CreateVariables(*inference_program_, + sub_scope_ ? sub_scope_ : scope_.get(), 0); + + // Get the feed_target_names and fetch_target_names + feed_target_names_ = inference_program_->GetFeedTargetNames(); + fetch_target_names_ = inference_program_->GetFetchTargetNames(); + return true; + } + + bool Run(const std::vector& inputs, + std::vector* output_data, + int batch_size = -1) override { + PADDLE_ENFORCE_GT(batch_size, 0, + "TensorRT engine needs the argument batch_size set"); + FLAGS_tensorrt_engine_batch_size = batch_size; + return NativePaddlePredictor::Run(inputs, output_data, batch_size); + } + + void OptimizeInferenceProgram() { + // Analyze inference_program + Argument argument; + if (!config_.model_dir.empty()) { + argument.fluid_model_dir.reset(new std::string(config_.model_dir)); + } else { + PADDLE_ENFORCE( + !config_.param_file.empty(), + "Either model_dir or (param_file, prog_file) should be set."); + PADDLE_ENFORCE(!config_.prog_file.empty()); + argument.fluid_model_program_path.reset( + new std::string(config_.prog_file)); + argument.fluid_model_param_path.reset( + new std::string(config_.param_file)); + } + argument.origin_program_desc.reset( + new ProgramDesc(*inference_program_->Proto())); + Singleton::Global().Run(&argument); + CHECK(argument.transformed_program_desc); + VLOG(5) << "transformed program:\n" + << argument.transformed_program_desc->SerializeAsString(); + VLOG(5) << "to prepare executor"; + inference_program_.reset( + new framework::ProgramDesc(*argument.transformed_program_desc)); + } + + private: + TensorRTConfig config_; +}; + +template <> +std::unique_ptr +CreatePaddlePredictor( + const TensorRTConfig& config) { + VLOG(3) << "create TensorRTSubgraphPredictor"; + if (config.use_gpu) { + // 1. GPU memeroy + PADDLE_ENFORCE_GT( + config.fraction_of_gpu_memory, 0.f, + "fraction_of_gpu_memory in the config should be set to range (0., 1.]"); + PADDLE_ENFORCE_GE(config.device, 0, "Invalid device id %d", config.device); + std::vector flags; + if (config.fraction_of_gpu_memory >= 0.0f || + config.fraction_of_gpu_memory <= 0.95f) { + flags.push_back("dummpy"); + std::string flag = "--fraction_of_gpu_memory_to_use=" + + std::to_string(config.fraction_of_gpu_memory); + flags.push_back(flag); + VLOG(3) << "set flag: " << flag; + framework::InitGflags(flags); + } + } + + std::unique_ptr predictor( + new TensorRTSubgraphPredictor(config)); + if (!dynamic_cast(predictor.get()) + ->Init(nullptr)) { + return nullptr; + } + return std::move(predictor); +} + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc new file mode 100644 index 0000000000..fcbf9b89d6 --- /dev/null +++ b/paddle/fluid/inference/api/api_tensorrt_subgraph_engine_tester.cc @@ -0,0 +1,92 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include "paddle/fluid/inference/analysis/analyzer.h" +#include "paddle/fluid/inference/api/paddle_inference_api.h" + +namespace paddle { + +DEFINE_string(dirname, "", "Directory of the inference model."); + +void CompareTensorRTWithFluid(bool enable_tensorrt) { + FLAGS_inference_analysis_enable_tensorrt_subgraph_engine = enable_tensorrt; + + //# 1. Create PaddlePredictor with a config. + NativeConfig config0; + config0.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config0.use_gpu = true; + config0.fraction_of_gpu_memory = 0.3; + config0.device = 0; + + TensorRTConfig config1; + config1.model_dir = FLAGS_dirname + "word2vec.inference.model"; + config1.use_gpu = true; + config1.fraction_of_gpu_memory = 0.3; + config1.device = 0; + + auto predictor0 = + CreatePaddlePredictor(config0); + auto predictor1 = + CreatePaddlePredictor(config1); + + for (int batch_id = 0; batch_id < 1; batch_id++) { + //# 2. Prepare input. + std::vector data(20); + for (int i = 0; i < 20; i++) data[i] = i; + + PaddleTensor tensor; + tensor.shape = std::vector({10, 1}); + tensor.data = PaddleBuf(data.data(), data.size() * sizeof(int64_t)); + tensor.dtype = PaddleDType::INT64; + + // For simplicity, we set all the slots with the same data. + std::vector slots(4, tensor); + + //# 3. Run + std::vector outputs0; + std::vector outputs1; + CHECK(predictor0->Run(slots, &outputs0)); + CHECK(predictor1->Run(slots, &outputs1, 10)); + + //# 4. Get output. + ASSERT_EQ(outputs0.size(), 1UL); + ASSERT_EQ(outputs1.size(), 1UL); + + const size_t num_elements = outputs0.front().data.length() / sizeof(float); + const size_t num_elements1 = outputs1.front().data.length() / sizeof(float); + EXPECT_EQ(num_elements, num_elements1); + + auto *data0 = static_cast(outputs0.front().data.data()); + auto *data1 = static_cast(outputs1.front().data.data()); + + ASSERT_GT(num_elements, 0UL); + for (size_t i = 0; i < std::min(num_elements, num_elements1); i++) { + EXPECT_NEAR(data0[i], data1[i], 1e-3); + } + } +} + +TEST(paddle_inference_api_tensorrt_subgraph_engine, without_tensorrt) { + CompareTensorRTWithFluid(false); +} + +TEST(paddle_inference_api_tensorrt_subgraph_engine, with_tensorrt) { + CompareTensorRTWithFluid(true); +} + +} // namespace paddle diff --git a/paddle/fluid/inference/api/api_tester.cc b/paddle/fluid/inference/api/api_tester.cc new file mode 100644 index 0000000000..7a579610ee --- /dev/null +++ b/paddle/fluid/inference/api/api_tester.cc @@ -0,0 +1,64 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/inference/api/paddle_inference_api.h" + +namespace paddle { + +/* + * Do not use this, just a demo indicating how to customize a config for a + * specific predictor. + */ +struct DemoConfig : public PaddlePredictor::Config { + float other_config; +}; + +/* + * Do not use this, just a demo indicating how to customize a Predictor. + */ +class DemoPredictor : public PaddlePredictor { + public: + explicit DemoPredictor(const DemoConfig &config) { + LOG(INFO) << "I get other_config " << config.other_config; + } + bool Run(const std::vector &inputs, + std::vector *output_data, + int batch_size = 0) override { + LOG(INFO) << "Run"; + return false; + } + + std::unique_ptr Clone() override { return nullptr; } + + ~DemoPredictor() override {} +}; + +template <> +std::unique_ptr CreatePaddlePredictor( + const DemoConfig &config) { + std::unique_ptr x(new DemoPredictor(config)); + return x; +} + +TEST(paddle_inference_api, demo) { + DemoConfig config; + config.other_config = 1.7; + auto predictor = CreatePaddlePredictor(config); + std::vector outputs; + predictor->Run({}, &outputs); +} + +} // namespace paddle diff --git a/paddle/fluid/inference/api/demo_ci/.gitignore b/paddle/fluid/inference/api/demo_ci/.gitignore new file mode 100644 index 0000000000..1269488f7f --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/.gitignore @@ -0,0 +1 @@ +data diff --git a/paddle/fluid/inference/api/demo_ci/CMakeLists.txt b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt new file mode 100644 index 0000000000..ba73a6eaa6 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/CMakeLists.txt @@ -0,0 +1,73 @@ +cmake_minimum_required(VERSION 3.0) + +project(cpp_inference_demo CXX C) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +if(NOT DEFINED PADDLE_LIB) + message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/path/paddle/lib") +endif() +if(NOT DEFINED DEMO_NAME) + message(FATAL_ERROR "please set DEMO_NAME with -DDEMO_NAME=demo_name") +endif() + +option(WITH_MKL "Compile demo with MKL/OpenBlas support, default use MKL." ON) +option(WITH_GPU "Compile demo with GPU/CPU, default use CPU." OFF) +option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static." ON) + +if(WITH_GPU) + set(CUDA_LIB "/usr/local/cuda/lib64/" CACHE STRING "CUDA Library") +endif() + +include_directories("${PADDLE_LIB}") +include_directories("${PADDLE_LIB}/third_party/install/protobuf/include") +include_directories("${PADDLE_LIB}/third_party/install/glog/include") +include_directories("${PADDLE_LIB}/third_party/install/gflags/include") +include_directories("${PADDLE_LIB}/third_party/install/snappy/include") +include_directories("${PADDLE_LIB}/third_party/install/snappystream/include") +include_directories("${PADDLE_LIB}/third_party/install/zlib/include") + +include_directories("${PADDLE_LIB}/third_party/boost") +include_directories("${PADDLE_LIB}/third_party/eigen3") + +link_directories("${PADDLE_LIB}/third_party/install/snappy/lib") +link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib") +link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") +link_directories("${PADDLE_LIB}/third_party/install/glog/lib") +link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") +link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") + +add_executable(${DEMO_NAME} ${DEMO_NAME}.cc) + +if(WITH_MKL) + include_directories("${PADDLE_LIB}/third_party/install/mklml/include") + set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel.so + ${PADDLE_LIB}/third_party/install/mklml/lib/libiomp5.so) + set(MKLDNN_PATH "${PADDLE_LIB}/third_party/install/mkldnn") + if(EXISTS ${MKLDNN_PATH}) + include_directories("${MKLDNN_PATH}/include") + set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0) + endif() +else() + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) +endif() + +# Note: libpaddle_inference_api.so/a must put before libpaddle_fluid.so/a +if(WITH_STATIC_LIB) + set(DEPS + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a) +else() + set(DEPS + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.so) +endif() +set(EXTERNAL_LIB "-lrt -ldl -lpthread") + +set(DEPS ${DEPS} + ${MATH_LIB} ${MKLDNN_LIB} + glog gflags protobuf snappystream snappy z + ${EXTERNAL_LIB}) +if(WITH_GPU) + set(DEPS ${DEPS} ${CUDA_LIB}/libcudart.so) +endif() + +target_link_libraries(${DEMO_NAME} ${DEPS}) diff --git a/paddle/fluid/inference/api/demo_ci/README.md b/paddle/fluid/inference/api/demo_ci/README.md new file mode 100644 index 0000000000..7f013da7f3 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/README.md @@ -0,0 +1,26 @@ +# Inference Demos + +There are several demos: + +- simple_on_word2vec: + - Follow the C++ codes is in `simple_on_word2vec.cc`. + - It is suitable for word2vec model. +- vis_demo: + - Follow the C++ codes is in `vis_demo.cc`. + - It is suitable for mobilenet, se_resnext50 and ocr three models. + - Input data format: + - Each line contains a single record + - Each record's format is + ``` + \t + ``` + +To build and execute the demos, simply run +``` +./run.sh $PADDLE_ROOT $TURN_ON_MKL $TEST_GPU_CPU +``` +- It will build and execute the demos in both static and shared library. +- `$PADDLE_ROOT`: paddle library path +- `$TURN_ON_MKL`: use MKL or Openblas +- `$TEST_GPU_CPU`: test both GPU/CPU mode or only CPU mode +- NOTE: for simple_on_word2vec, must run `ctest -R test_word2vec -R` to obtain word2vec model at first. diff --git a/paddle/fluid/inference/api/demo_ci/clean.sh b/paddle/fluid/inference/api/demo_ci/clean.sh new file mode 100755 index 0000000000..0d9f3d2aa2 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/clean.sh @@ -0,0 +1,4 @@ +set -x +cd `dirname $0` +rm -rf build/ data/ +set +x diff --git a/paddle/fluid/inference/api/demo_ci/run.sh b/paddle/fluid/inference/api/demo_ci/run.sh new file mode 100755 index 0000000000..3e829dd726 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/run.sh @@ -0,0 +1,81 @@ +set -x +PADDLE_ROOT=$1 +TURN_ON_MKL=$2 # use MKL or Openblas +TEST_GPU_CPU=$3 # test both GPU/CPU mode or only CPU mode +if [ $2 == ON ]; then + # You can export yourself if move the install path + MKL_LIB=${PADDLE_ROOT}/build/fluid_install_dir/third_party/install/mklml/lib + export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:${MKL_LIB} +fi +if [ $3 == ON ]; then + use_gpu_list='true false' +else + use_gpu_list='false' +fi + +# download vis_demo data +function download() { + dir_name=$1 + mkdir -p $dir_name + cd $dir_name + wget -q ${URL_ROOT}$dir_name.tar.gz + tar xzf *.tar.gz + cd .. +} +URL_ROOT=http://paddlemodels.bj.bcebos.com/inference-vis-demos%2F +mkdir -p data +cd data +vis_demo_list='se_resnext50 ocr mobilenet' +for vis_demo_name in $vis_demo_list; do + download $vis_demo_name +done +cd .. + +# compile and test the demo +mkdir -p build +cd build + +for WITH_STATIC_LIB in ON OFF; do + # -----simple_on_word2vec----- + rm -rf * + cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ + -DWITH_MKL=$TURN_ON_MKL \ + -DDEMO_NAME=simple_on_word2vec \ + -DWITH_GPU=$TEST_GPU_CPU \ + -DWITH_STATIC_LIB=$WITH_STATIC_LIB + make -j + word2vec_model=${PADDLE_ROOT}'/build/python/paddle/fluid/tests/book/word2vec.inference.model' + if [ -d $word2vec_model ]; then + for use_gpu in $use_gpu_list; do + ./simple_on_word2vec \ + --dirname=$word2vec_model \ + --use_gpu=$use_gpu + if [ $? -ne 0 ]; then + echo "simple_on_word2vec demo runs fail." + exit 1 + fi + done + fi + # ---------vis_demo--------- + rm -rf * + cmake .. -DPADDLE_LIB=${PADDLE_ROOT}/build/fluid_install_dir/ \ + -DWITH_MKL=$TURN_ON_MKL \ + -DDEMO_NAME=vis_demo \ + -DWITH_GPU=$TEST_GPU_CPU \ + -DWITH_STATIC_LIB=$WITH_STATIC_LIB + make -j + for use_gpu in $use_gpu_list; do + for vis_demo_name in $vis_demo_list; do + ./vis_demo \ + --modeldir=../data/$vis_demo_name/model \ + --data=../data/$vis_demo_name/data.txt \ + --refer=../data/$vis_demo_name/result.txt \ + --use_gpu=$use_gpu + if [ $? -ne 0 ]; then + echo "vis demo $vis_demo_name runs fail." + exit 1 + fi + done + done +done +set +x diff --git a/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc b/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc new file mode 100644 index 0000000000..03ac79e9ed --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/simple_on_word2vec.cc @@ -0,0 +1,142 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains a simple demo for how to take a model for inference. + */ + +#include +#include +#include +#include //NOLINT +#include "paddle/fluid/inference/paddle_inference_api.h" +#include "paddle/fluid/platform/enforce.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); +DEFINE_bool(use_gpu, false, "Whether use gpu."); + +namespace paddle { +namespace demo { + +void Main(bool use_gpu) { + //# 1. Create PaddlePredictor with a config. + NativeConfig config; + if (FLAGS_dirname.empty()) { + LOG(INFO) << "Usage: ./simple_on_word2vec --dirname=path/to/your/model"; + exit(1); + } + config.model_dir = FLAGS_dirname; + config.use_gpu = use_gpu; + config.fraction_of_gpu_memory = 0.15; + config.device = 0; + auto predictor = + CreatePaddlePredictor(config); + + for (int batch_id = 0; batch_id < 3; batch_id++) { + //# 2. Prepare input. + int64_t data[4] = {1, 2, 3, 4}; + + PaddleTensor tensor; + tensor.shape = std::vector({4, 1}); + tensor.data = PaddleBuf(data, sizeof(data)); + tensor.dtype = PaddleDType::INT64; + + // For simplicity, we set all the slots with the same data. + std::vector slots(4, tensor); + + //# 3. Run + std::vector outputs; + CHECK(predictor->Run(slots, &outputs)); + + //# 4. Get output. + PADDLE_ENFORCE(outputs.size(), 1UL); + // Check the output buffer size and result of each tid. + PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); + float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815, + 0.000932706}; + const size_t num_elements = outputs.front().data.length() / sizeof(float); + // The outputs' buffers are in CPU memory. + for (size_t i = 0; i < std::min(5UL, num_elements); i++) { + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + result[i]); + } + } +} + +void MainThreads(int num_threads, bool use_gpu) { + // Multi-threads only support on CPU + // 0. Create PaddlePredictor with a config. + NativeConfig config; + config.model_dir = FLAGS_dirname; + config.use_gpu = use_gpu; + config.fraction_of_gpu_memory = 0.15; + config.device = 0; + auto main_predictor = + CreatePaddlePredictor(config); + + std::vector threads; + for (int tid = 0; tid < num_threads; ++tid) { + threads.emplace_back([&, tid]() { + // 1. clone a predictor which shares the same parameters + auto predictor = main_predictor->Clone(); + constexpr int num_batches = 3; + for (int batch_id = 0; batch_id < num_batches; ++batch_id) { + // 2. Dummy Input Data + int64_t data[4] = {1, 2, 3, 4}; + PaddleTensor tensor; + tensor.shape = std::vector({4, 1}); + tensor.data = PaddleBuf(data, sizeof(data)); + tensor.dtype = PaddleDType::INT64; + + std::vector inputs(4, tensor); + std::vector outputs; + // 3. Run + CHECK(predictor->Run(inputs, &outputs)); + + // 4. Get output. + PADDLE_ENFORCE(outputs.size(), 1UL); + // Check the output buffer size and result of each tid. + PADDLE_ENFORCE(outputs.front().data.length(), 33168UL); + float result[5] = {0.00129761, 0.00151112, 0.000423564, 0.00108815, + 0.000932706}; + const size_t num_elements = + outputs.front().data.length() / sizeof(float); + // The outputs' buffers are in CPU memory. + for (size_t i = 0; i < std::min(5UL, num_elements); i++) { + PADDLE_ENFORCE(static_cast(outputs.front().data.data())[i], + result[i]); + } + } + }); + } + for (int i = 0; i < num_threads; ++i) { + threads[i].join(); + } +} + +} // namespace demo +} // namespace paddle + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, true); + paddle::demo::Main(false /* use_gpu*/); + paddle::demo::MainThreads(1, false /* use_gpu*/); + paddle::demo::MainThreads(4, false /* use_gpu*/); + if (FLAGS_use_gpu) { + paddle::demo::Main(true /*use_gpu*/); + paddle::demo::MainThreads(1, true /*use_gpu*/); + paddle::demo::MainThreads(4, true /*use_gpu*/); + } + return 0; +} diff --git a/paddle/fluid/inference/api/demo_ci/utils.h b/paddle/fluid/inference/api/demo_ci/utils.h new file mode 100644 index 0000000000..cb89906711 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/utils.h @@ -0,0 +1,67 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include +#include +#include "paddle/fluid/inference/paddle_inference_api.h" + +namespace paddle { +namespace demo { + +static void split(const std::string& str, char sep, + std::vector* pieces) { + pieces->clear(); + if (str.empty()) { + return; + } + size_t pos = 0; + size_t next = str.find(sep, pos); + while (next != std::string::npos) { + pieces->push_back(str.substr(pos, next - pos)); + pos = next + 1; + next = str.find(sep, pos); + } + if (!str.substr(pos).empty()) { + pieces->push_back(str.substr(pos)); + } +} + +/* + * Get a summary of a PaddleTensor content. + */ +static std::string SummaryTensor(const PaddleTensor& tensor) { + std::stringstream ss; + int num_elems = tensor.data.length() / PaddleDtypeSize(tensor.dtype); + + ss << "data[:10]\t"; + switch (tensor.dtype) { + case PaddleDType::INT64: { + for (int i = 0; i < std::min(num_elems, 10); i++) { + ss << static_cast(tensor.data.data())[i] << " "; + } + break; + } + case PaddleDType::FLOAT32: + for (int i = 0; i < std::min(num_elems, 10); i++) { + ss << static_cast(tensor.data.data())[i] << " "; + } + break; + } + return ss.str(); +} + +} // namespace demo +} // namespace paddle diff --git a/paddle/fluid/inference/api/demo_ci/vis_demo.cc b/paddle/fluid/inference/api/demo_ci/vis_demo.cc new file mode 100644 index 0000000000..3800d49b34 --- /dev/null +++ b/paddle/fluid/inference/api/demo_ci/vis_demo.cc @@ -0,0 +1,154 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains demo for mobilenet, se-resnext50 and ocr. + */ + +#include +#include // use glog instead of PADDLE_ENFORCE to avoid importing other paddle header files. +#include +#include +#include "paddle/fluid/inference/demo_ci/utils.h" +#include "paddle/fluid/platform/enforce.h" + +#ifdef PADDLE_WITH_CUDA +DECLARE_double(fraction_of_gpu_memory_to_use); +#endif +DEFINE_string(modeldir, "", "Directory of the inference model."); +DEFINE_string(refer, "", "path to reference result for comparison."); +DEFINE_string( + data, "", + "path of data; each line is a record, format is " + "'\t data; + std::vector shape; +}; + +void split(const std::string& str, char sep, std::vector* pieces); + +Record ProcessALine(const std::string& line) { + VLOG(3) << "process a line"; + std::vector columns; + split(line, '\t', &columns); + CHECK_EQ(columns.size(), 2UL) + << "data format error, should be \t"; + + Record record; + std::vector data_strs; + split(columns[0], ' ', &data_strs); + for (auto& d : data_strs) { + record.data.push_back(std::stof(d)); + } + + std::vector shape_strs; + split(columns[1], ' ', &shape_strs); + for (auto& s : shape_strs) { + record.shape.push_back(std::stoi(s)); + } + VLOG(3) << "data size " << record.data.size(); + VLOG(3) << "data shape size " << record.shape.size(); + return record; +} + +void CheckOutput(const std::string& referfile, const PaddleTensor& output) { + std::string line; + std::ifstream file(referfile); + std::getline(file, line); + auto refer = ProcessALine(line); + file.close(); + + size_t numel = output.data.length() / PaddleDtypeSize(output.dtype); + VLOG(3) << "predictor output numel " << numel; + VLOG(3) << "reference output numel " << refer.data.size(); + PADDLE_ENFORCE_EQ(numel, refer.data.size()); + switch (output.dtype) { + case PaddleDType::INT64: { + for (size_t i = 0; i < numel; ++i) { + PADDLE_ENFORCE_EQ(static_cast(output.data.data())[i], + refer.data[i]); + } + break; + } + case PaddleDType::FLOAT32: + for (size_t i = 0; i < numel; ++i) { + PADDLE_ENFORCE_LT( + fabs(static_cast(output.data.data())[i] - refer.data[i]), + 1e-5); + } + break; + } +} + +/* + * Use the native fluid engine to inference the demo. + */ +void Main(bool use_gpu) { + NativeConfig config; + config.param_file = FLAGS_modeldir + "/__params__"; + config.prog_file = FLAGS_modeldir + "/__model__"; + config.use_gpu = use_gpu; + config.device = 0; + if (FLAGS_use_gpu) { + config.fraction_of_gpu_memory = 0.1; // set by yourself + } + + VLOG(3) << "init predictor"; + auto predictor = + CreatePaddlePredictor(config); + + VLOG(3) << "begin to process data"; + // Just a single batch of data. + std::string line; + std::ifstream file(FLAGS_data); + std::getline(file, line); + auto record = ProcessALine(line); + file.close(); + + // Inference. + PaddleTensor input; + input.shape = record.shape; + input.data = + PaddleBuf(record.data.data(), record.data.size() * sizeof(float)); + input.dtype = PaddleDType::FLOAT32; + + VLOG(3) << "run executor"; + std::vector output; + predictor->Run({input}, &output); + + VLOG(3) << "output.size " << output.size(); + auto& tensor = output.front(); + VLOG(3) << "output: " << SummaryTensor(tensor); + + // compare with reference result + CheckOutput(FLAGS_refer, tensor); +} + +} // namespace demo +} // namespace paddle + +int main(int argc, char** argv) { + google::ParseCommandLineFlags(&argc, &argv, true); + paddle::demo::Main(false /* use_gpu*/); + if (FLAGS_use_gpu) { + paddle::demo::Main(true /*use_gpu*/); + } + return 0; +} diff --git a/paddle/fluid/inference/api/high_level_api.md b/paddle/fluid/inference/api/high_level_api.md new file mode 100644 index 0000000000..8b8b6916d7 --- /dev/null +++ b/paddle/fluid/inference/api/high_level_api.md @@ -0,0 +1,60 @@ +# Inference High-level APIs +This document describes the high-level inference APIs, one can use them to deploy a Paddle model for an application quickly. + +The APIs are described in `paddle_inference_api.h`, just one header file, and two libaries `libpaddle_fluid.so` and `libpaddle_fluid_api.so` are needed for a deployment. + +## PaddleTensor +We provide the `PaddleTensor` data structure to give a general tensor interface. + +The definition is + +```c++ +struct PaddleTensor { + std::string name; // variable name. + std::vector shape; + PaddleBuf data; // blob of data. + PaddleDType dtype; +}; +``` + +The data is stored in a continuous memory `PaddleBuf,` and a `PaddleDType` specifies tensor's data type. +The `name` field is used to specify the name of an input variable, +that is important when there are multiple inputs and need to distinguish which variable to set. + +## engine +The inference APIs has two different underlying engines + +- the native engine, which is consists of the native operators and framework, +- the Anakin engine, which has an Anakin library embedded. + +The native engine takes a native Paddle model as input, and supports any model that trained by Paddle, +the Anakin engine is faster for some model, +but it can only take the Anakin model as input(user need to transform the format first manually) and currently not all Paddle models are supported. + +```c++ +enum class PaddleEngineKind { + kNative = 0, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. +}; +``` + +## PaddlePredictor and how to create one +The main interface is `PaddlePredictor,` there are following methods + +- `bool Run(const std::vector& inputs, std::vector* output_data)` + - take inputs and output `output_data.` +- `Clone` to clone a predictor from an existing one, with model parameter shared. + +There is a factory method to help create a predictor, and the user takes the ownership of this object. + +```c++ +template +std::unique_ptr CreatePaddlePredictor(const ConfigT& config); +``` + +By specifying the engine kind and config, one can get a specific implementation. + +## Reference + +- [paddle_inference_api.h](./paddle_inference_api.h) +- [some demos](./demo_ci) diff --git a/paddle/fluid/inference/api/high_level_api_cn.md b/paddle/fluid/inference/api/high_level_api_cn.md new file mode 100644 index 0000000000..2fb914592c --- /dev/null +++ b/paddle/fluid/inference/api/high_level_api_cn.md @@ -0,0 +1,87 @@ +# Paddle 预测 API + +为了更简单方便的预测部署,Fluid 提供了一套高层 API 用来隐藏底层不同的优化实现。 + +预测库包含: + +- 头文件 `paddle_inference_api.h` 定义了所有的接口 +- 库文件`libpaddle_fluid.so` 或 `libpaddle_fluid.a` +- 库文件 `libpaddle_inference_api.so` 或 `libpaddle_inference_api.a` + +下面是详细的一些 API 概念介绍 + +## PaddleTensor + +PaddleTensor 定义了预测最基本的输入输出的数据格式,其定义是 + +```c++ +struct PaddleTensor { + std::string name; // variable name. + std::vector shape; + PaddleBuf data; // blob of data. + PaddleDType dtype; +}; +``` + +- `name` 用于指定输入数据对应的 模型中variable 的名字 (暂时没有用,但会在后续支持任意 target 时启用) +- `shape` 表示一个 Tensor 的 shape +- `data` 数据以连续内存的方式存储在`PaddleBuf` 中,`PaddleBuf` 可以接收外面的数据或者独立`malloc`内存,详细可以参考头文件中相关定义。 +- `dtype` 表示 Tensor 的数据类型 + +## engine + +高层 API 底层有多种优化实现,我们称之为 engine,目前有三种 engine + +- 原生 engine,由 paddle 原生的 forward operator 组成,可以天然支持所有paddle 训练出的模型, +- Anakin engine,封装了 [Anakin](https://github.com/PaddlePaddle/Anakin) ,在某些模型上性能不错,但只能接受自带模型格式,无法支持所有 paddle 模型, +- TensorRT mixed engine,用子图的方式支持了 [TensorRT](https://developer.nvidia.com/tensorrt) ,支持所有paddle 模型,并自动切割部分计算子图到 TensorRT 上加速(WIP) + +其实现为 + +```c++ +enum class PaddleEngineKind { + kNative = 0, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. + kAutoMixedTensorRT // Automatically mixing TensorRT with the Fluid ops. +}; +``` + +## 预测部署过程 + +总体上分为以下步骤 + +1. 用合适的配置创建 `PaddlePredictor` +2. 创建输入用的 `PaddleTensor`,传入到 `PaddlePredictor` 中 +3. 获取输出的 `PaddleTensor` ,将结果取出 + +下面完整演示一个简单的模型,部分细节代码隐去 + +```c++ +#include "paddle_inference_api.h" + +// 创建一个 config,并修改相关设置 +paddle::NativeConfig config; +config.model_dir = "xxx"; +config.use_gpu = false; +// 创建一个原生的 PaddlePredictor +auto predictor = + paddle::CreatePaddlePredictor(config); +// 创建输入 tensor +int64_t data[4] = {1, 2, 3, 4}; +paddle::PaddleTensor tensor{.name = "", + .shape = std::vector({4, 1}), + .data = PaddleBuf(data, sizeof(data)), + .dtype = PaddleDType::INT64}; +// 创建输出 tensor,输出 tensor 的内存可以复用 +std::vector outputs; +// 执行预测 +CHECK(predictor->Run(slots, &outputs)); +// 获取 outputs ... +``` + +编译时,联编 `libpaddle_fluid.a/.so` 和 `libpaddle_inference_api.a/.so` 便可。 + +## 详细代码参考 + +- [inference demos](./demo_ci) +- [复杂单线程/多线程例子](https://github.com/PaddlePaddle/Paddle/blob/develop/paddle/fluid/inference/api/test_api_impl.cc) diff --git a/paddle/fluid/inference/api/paddle_inference_api.h b/paddle/fluid/inference/api/paddle_inference_api.h new file mode 100644 index 0000000000..794534467b --- /dev/null +++ b/paddle/fluid/inference/api/paddle_inference_api.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file contains the definition of a simple Inference API for Paddle. + * + * ATTENTION: It requires some C++11 features, for lower version C++ or C, we + * might release another API. + */ + +#pragma once + +#include +#include +#include +#include + +namespace paddle { + +enum PaddleDType { + FLOAT32, + INT64, +}; + +class PaddleBuf { + public: + PaddleBuf() = default; + PaddleBuf(PaddleBuf&& other); + // Copy only available when memory is managed externally. + explicit PaddleBuf(const PaddleBuf&); + PaddleBuf& operator=(const PaddleBuf&); + PaddleBuf& operator=(PaddleBuf&&); + // Do not own the memory. + PaddleBuf(void* data, size_t length) + : data_(data), length_(length), memory_owned_{false} {} + // Own memory. + explicit PaddleBuf(size_t length) + : data_(new char[length]), length_(length), memory_owned_(true) {} + // Resize to `length` bytes. + void Resize(size_t length); + // Reset to external memory. + void Reset(void* data, size_t length); + bool empty() const { return length_ == 0; } + void* data() const { return data_; } + size_t length() const { return length_; } + + ~PaddleBuf() { Free(); } + + private: + void Free(); + void* data_{nullptr}; // pointer to the data memory. + size_t length_{0}; // number of memory bytes. + bool memory_owned_{true}; +}; + +struct PaddleTensor { + PaddleTensor() = default; + std::string name; // variable name. + std::vector shape; + PaddleBuf data; // blob of data. + PaddleDType dtype; + std::vector> lod; // lod data +}; + +enum class PaddleEngineKind { + kNative = 0, // Use the native Fluid facility. + kAnakin, // Use Anakin for inference. + kAutoMixedTensorRT, // Automatically mix Fluid with TensorRT. + // TODO(Superjomn) support following engines latter. + // kTensorRT, // Use TensorRT for inference. + // kAutoMixedAnakin, // Automatically mix Fluid with Anakin. +}; + +/* + * A simple Inference API for Paddle. Currently this API can be used by + * non-sequence scenerios. + */ +class PaddlePredictor { + public: + struct Config; + PaddlePredictor() = default; + PaddlePredictor(const PaddlePredictor&) = delete; + PaddlePredictor& operator=(const PaddlePredictor&) = delete; + + // Predict an record. + // The caller should be responsible for allocating and releasing the memory of + // `inputs`. `inputs` should be available until Run returns. Caller should be + // responsible for the output tensor's buffer, either allocated or passed from + // outside. + virtual bool Run(const std::vector& inputs, + std::vector* output_data, + int batch_size = -1) = 0; + + // Clone a predictor that share the model weights, the Cloned predictor should + // be thread-safe. + virtual std::unique_ptr Clone() = 0; + + // Destroy the Predictor. + virtual ~PaddlePredictor() = default; + + // The common configs for all the predictors. + struct Config { + std::string model_dir; // path to the model directory. + }; +}; + +struct NativeConfig : public PaddlePredictor::Config { + // GPU related fields. + bool use_gpu{false}; + int device{0}; + float fraction_of_gpu_memory{-1.f}; // Negative to notify initialization. + + std::string prog_file; + std::string param_file; +}; + +// Configurations for Anakin engine. +struct AnakinConfig : public PaddlePredictor::Config { + enum TargetType { NVGPU = 0, X86 }; + int device; + std::string model_file; + int max_batch_size{-1}; + TargetType target_type; +}; + +struct TensorRTConfig : public NativeConfig { + // Determine whether a subgraph will be executed by TRT. + int min_subgraph_size{1}; +}; + +// A factory to help create different predictors. +// +// FOR EXTENSION DEVELOPER: +// Different predictors are designated by config type and engine kind. Similar +// configs can be merged, but there shouldn't be a huge config containing +// different fields for more than one kind of predictors. +// +// Similarly, each engine kind should map to a unique predictor implementation. +template +std::unique_ptr CreatePaddlePredictor(const ConfigT& config); + +int PaddleDtypeSize(PaddleDType dtype); + +} // namespace paddle diff --git a/paddle/fluid/inference/check_symbol.sh b/paddle/fluid/inference/check_symbol.sh new file mode 100755 index 0000000000..12b7b3e7e5 --- /dev/null +++ b/paddle/fluid/inference/check_symbol.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +lib=$1 +if [ $# -ne 1 ]; then echo "No input library"; exit -1 ; fi + +num_paddle_syms=$(nm -D ${lib} | grep paddle | wc -l) +num_google_syms=$(nm -D ${lib} | grep google | grep -v paddle | grep T | wc -l) + +if [ $num_paddle_syms -le 0 ]; then echo "Have no paddle symbols"; exit -1 ; fi +if [ $num_google_syms -ge 1 ]; then echo "Have some google symbols"; exit -1 ; fi + +exit 0 diff --git a/paddle/fluid/inference/engine.h b/paddle/fluid/inference/engine.h index 6b0ac92fa9..ce2b816171 100644 --- a/paddle/fluid/inference/engine.h +++ b/paddle/fluid/inference/engine.h @@ -14,11 +14,15 @@ limitations under the License. */ #pragma once +#include #include "paddle/fluid/framework/framework.pb.h" namespace paddle { namespace inference { +struct Buffer; +enum class DeviceType { UNK = -1, CPU, GPU }; + /* * EngineBase is the base class of all inference engines. An inference engine * takes a paddle program as input, and outputs the result in fluid Tensor @@ -45,8 +49,20 @@ class EngineBase { // Execute the engine, that will run the inference network. virtual void Execute(int batch_size) = 0; + // Return the IO buffer that allocated in engine. One can read/write directly + // on the buffer. If the buffer's buffer is nullptr, one can also allocate + // memory and maintain it outside the engine. + virtual Buffer& buffer(const std::string& name) = 0; + virtual ~EngineBase() {} }; // class EngineBase +struct Buffer { + void* buffer{nullptr}; // buffer should be allocated only once. + size_t max_size; // buffer allocated space. + size_t size; // data size. + DeviceType device{DeviceType::UNK}; // tells which device this buffer is on. +}; + } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/io.cc b/paddle/fluid/inference/io.cc index 65db7c7b50..181868977d 100644 --- a/paddle/fluid/inference/io.cc +++ b/paddle/fluid/inference/io.cc @@ -20,16 +20,20 @@ limitations under the License. */ #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/feed_fetch_type.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/pybind/pybind.h" DEFINE_string(devices, "", "The devices to be used which is joined by comma."); DEFINE_bool(init_p2p, false, "Whether to init p2p."); +DEFINE_int32(math_num_threads, 1, + "Number of threads used to run math functions."); namespace paddle { namespace inference { void Init(const std::vector argv) { framework::InitGflags(argv); + platform::SetNumThreads(FLAGS_math_num_threads); // init devices std::vector devices; std::string token; diff --git a/paddle/fluid/inference/io.h b/paddle/fluid/inference/io.h index caf599b1a6..01b50b3670 100644 --- a/paddle/fluid/inference/io.h +++ b/paddle/fluid/inference/io.h @@ -18,9 +18,9 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace inference { diff --git a/paddle/fluid/inference/paddle_fluid.sym b/paddle/fluid/inference/paddle_fluid.sym new file mode 100644 index 0000000000..ef2a04d788 --- /dev/null +++ b/paddle/fluid/inference/paddle_fluid.sym @@ -0,0 +1 @@ +*paddle* diff --git a/paddle/fluid/inference/tensorrt/CMakeLists.txt b/paddle/fluid/inference/tensorrt/CMakeLists.txt index 288789d6e4..b52d083f28 100644 --- a/paddle/fluid/inference/tensorrt/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/CMakeLists.txt @@ -1,4 +1,4 @@ +nv_library(tensorrt_engine SRCS engine.cc DEPS framework_proto) nv_test(test_tensorrt SRCS test_tensorrt.cc DEPS dynload_cuda device_context dynamic_loader) -nv_test(test_tensorrt_engine SRCS test_engine.cc engine.cc DEPS dynload_cuda) -set(ENGINE_FILE ${CMAKE_CURRENT_SOURCE_DIR}/engine.cc) +nv_test(test_tensorrt_engine SRCS test_engine.cc DEPS dynload_cuda tensorrt_engine) add_subdirectory(convert) diff --git a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt index 3c5909c0be..6863b035d8 100644 --- a/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt +++ b/paddle/fluid/inference/tensorrt/convert/CMakeLists.txt @@ -1,4 +1,26 @@ -nv_test(test_op_converter SRCS test_op_converter.cc mul_op.cc conv2d_op.cc DEPS ${FLUID_CORE_MODULES}) -nv_test(test_trt_activation_op SRCS test_activation_op.cc ${ENGINE_FILE} activation_op.cc - DEPS ${FLUID_CORE_MODULES} activation_op) +# Add TRT tests +nv_library(tensorrt_converter + SRCS mul_op.cc conv2d_op.cc fc_op.cc pool2d_op.cc elementwise_op.cc +activation_op.cc softmax_op.cc + DEPS tensorrt_engine operator scope framework_proto op_registry) + +nv_test(test_op_converter SRCS test_op_converter.cc DEPS + ${FLUID_CORE_MODULES} tensorrt_engine tensorrt_converter) + nv_test(test_io_converter SRCS test_io_converter.cc io_converter.cc DEPS dynload_cuda dynamic_loader lod_tensor) +nv_test(test_trt_mul_op SRCS test_mul_op.cc mul_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine mul_op SERIAL) +nv_test(test_trt_fc_op SRCS test_fc_op.cc fc_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine mul_op SERIAL) +nv_test(test_trt_activation_op SRCS test_activation_op.cc activation_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine activation_op SERIAL) +nv_test(test_trt_conv_op SRCS test_conv2d_op.cc conv2d_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine conv_op SERIAL) +nv_test(test_trt_pool2d_op SRCS test_pool2d_op.cc pool2d_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine pool_op SERIAL) + +nv_test(test_trt_elementwise_op SRCS test_elementwise_op.cc elementwise_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine elementwise_add_op SERIAL) + +nv_test(test_trt_softmax_op SRCS test_softmax_op.cc softmax_op.cc + DEPS ${FLUID_CORE_MODULES} tensorrt_engine softmax_op SERIAL) diff --git a/paddle/fluid/inference/tensorrt/convert/activation_op.cc b/paddle/fluid/inference/tensorrt/convert/activation_op.cc index 543784289c..e1cace9cc1 100644 --- a/paddle/fluid/inference/tensorrt/convert/activation_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/activation_op.cc @@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/inference/tensorrt/convert/op_converter.h" namespace paddle { @@ -21,20 +22,29 @@ namespace tensorrt { class ReluOpConverter : public OpConverter { public: ReluOpConverter() {} - void operator()(const framework::OpDesc& op) override { + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + // Here the two nullptr looks strange, that's because the + // framework::OpDesc's constructor is strange. + framework::OpDesc op_desc(op, nullptr); LOG(INFO) << "convert a fluid relu op to tensorrt activation layer whose " "type is Relu"; const nvinfer1::ITensor* input_tensor = - engine_->GetITensor(op.Input("X")[0]); + engine_->GetITensor(op_desc.Input("X")[0]); nvinfer1::IActivationLayer* layer = TRT_ENGINE_ADD_LAYER( engine_, Activation, *const_cast(input_tensor), nvinfer1::ActivationType::kRELU); - engine_->SetITensor(op.Output("Out")[0], layer->getOutput(0)); + auto output_name = op_desc.Output("Out")[0]; + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { // the test framework can not determine which is the + // output, so place the declaration inside. + engine_->DeclareOutput(output_name); + } } }; -REGISTER_TRT_OP_CONVERTER(relu, ReluOpConverter); - } // namespace tensorrt } // namespace inference } // namespace paddle + +REGISTER_TRT_OP_CONVERTER(relu, ReluOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc index 431500b90e..dba1d50b2d 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc @@ -20,15 +20,65 @@ namespace tensorrt { class Conv2dOpConverter : public OpConverter { public: - Conv2dOpConverter() {} - void operator()(const framework::OpDesc& op) override { + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { LOG(INFO) << "convert a fluid conv2d op to tensorrt conv layer without bias"; + + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("Input").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Filter").size(), 1); // Y is a weight + PADDLE_ENFORCE_EQ(op_desc.Output("Output").size(), 1); + + auto* X = engine_->GetITensor(op_desc.Input("Input").front()); + // Declare weights + auto* Y_v = scope.FindVar(op_desc.Input("Filter").front()); + PADDLE_ENFORCE_NOT_NULL(Y_v); + auto* Y_t = Y_v->GetMutable(); + auto* weight_data = Y_t->mutable_data(platform::CPUPlace()); + + PADDLE_ENFORCE_EQ(Y_t->dims().size(), 4UL); + const int n_output = Y_t->dims()[0]; + const int filter_h = Y_t->dims()[2]; + const int filter_w = Y_t->dims()[3]; + + const int groups = boost::get(op_desc.GetAttr("groups")); + const std::vector dilations = + boost::get>(op_desc.GetAttr("dilations")); + const std::vector strides = + boost::get>(op_desc.GetAttr("strides")); + const std::vector paddings = + boost::get>(op_desc.GetAttr("paddings")); + + nvinfer1::DimsHW nv_ksize(filter_h, filter_w); + nvinfer1::DimsHW nv_dilations(dilations[0], dilations[1]); + nvinfer1::DimsHW nv_strides(strides[0], strides[1]); + nvinfer1::DimsHW nv_paddings(paddings[0], paddings[1]); + + TensorRTEngine::Weight weight{nvinfer1::DataType::kFLOAT, + static_cast(weight_data), + Y_t->memory_size() / sizeof(float)}; + + TensorRTEngine::Weight bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; + auto* layer = TRT_ENGINE_ADD_LAYER( + engine_, Convolution, *const_cast(X), n_output, + nv_ksize, weight.get(), bias.get()); + PADDLE_ENFORCE(layer != nullptr); + layer->setStride(nv_strides); + layer->setPadding(nv_paddings); + layer->setDilation(nv_dilations); + layer->setNbGroups(groups); + + auto output_name = op_desc.Output("Output").front(); + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { + engine_->DeclareOutput(output_name); + } } }; -REGISTER_TRT_OP_CONVERTER(conv2d, Conv2dOpConverter); - } // namespace tensorrt } // namespace inference } // namespace paddle + +REGISTER_TRT_OP_CONVERTER(conv2d, Conv2dOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc new file mode 100644 index 0000000000..3744550f60 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -0,0 +1,210 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +class ElementwiseWeightOpConverter : public OpConverter { + public: + ElementwiseWeightOpConverter() {} + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + // Here the two nullptr looks strange, that's because the + // framework::OpDesc's constructor is strange. + framework::OpDesc op_desc(op, nullptr); + LOG(INFO) << "convert a fluid elementwise op to tensorrt IScaleLayer"; + + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); // Y is a weight + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto* X = engine_->GetITensor(op_desc.Input("X").front()); + nvinfer1::Dims dims_x = X->getDimensions(); + PADDLE_ENFORCE(dims_x.nbDims >= 3); + + auto* Y_v = scope.FindVar(op_desc.Input("Y").front()); + PADDLE_ENFORCE_NOT_NULL(Y_v); + auto* Y_t = Y_v->GetMutable(); + auto* weight_data = Y_t->mutable_data(platform::CPUPlace()); + auto scale_mode = nvinfer1::ScaleMode::kELEMENTWISE; + + std::vector dims_y = framework::vectorize2int(Y_t->dims()); + if (static_cast(dims_y.size()) == dims_x.nbDims + 1) { + if (dims_y[0] == 1) dims_y.erase(dims_y.begin()); + } + + if (static_cast(dims_y.size()) == 1 && dims_y[0] == dims_x.d[0]) { + scale_mode = nvinfer1::ScaleMode::kCHANNEL; + } else if (static_cast(dims_y.size()) == dims_x.nbDims && + dims_y[0] == dims_x.d[0]) { + scale_mode = nvinfer1::ScaleMode::kELEMENTWISE; + for (int i = 1; i < dims_x.nbDims; i++) { + if (dims_y[i] != dims_x.d[i]) { + scale_mode = nvinfer1::ScaleMode::kCHANNEL; + break; + } + } + if (scale_mode == nvinfer1::ScaleMode::kCHANNEL) { + for (int i = 1; i < dims_x.nbDims; i++) { + if (dims_y[i] != 1) + PADDLE_THROW( + "TensorRT unsupported weight shape for Elementwise op!"); + } + } + } else { + PADDLE_THROW("TensorRT unsupported weight Shape for Elementwise op!"); + } + + TensorRTEngine::Weight shift_weights{nvinfer1::DataType::kFLOAT, + static_cast(weight_data), + Y_t->memory_size() / sizeof(float)}; + TensorRTEngine::Weight scale_weights{nvinfer1::DataType::kFLOAT, nullptr, + 0}; + TensorRTEngine::Weight power_weights{nvinfer1::DataType::kFLOAT, nullptr, + 0}; + + nvinfer1::IScaleLayer* layer = TRT_ENGINE_ADD_LAYER( + engine_, Scale, *const_cast(X), scale_mode, + shift_weights.get(), scale_weights.get(), power_weights.get()); + auto output_name = op_desc.Output("Out")[0]; + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { // the test framework can not determine which is the + // output, so place the declaration inside. + engine_->DeclareOutput(output_name); + } + } +}; + +class ElementwiseTensorOpConverter : public OpConverter { + public: + ElementwiseTensorOpConverter() {} + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + // Here the two nullptr looks strange, that's because the + // framework::OpDesc's constructor is strange. + framework::OpDesc op_desc(op, nullptr); + LOG(INFO) << "convert a fluid elementwise op to tensorrt IScaleLayer"; + + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); // Y is a weight + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + auto* X = engine_->GetITensor(op_desc.Input("X").front()); + auto* Y = engine_->GetITensor(op_desc.Input("Y").front()); + nvinfer1::Dims dims_x = X->getDimensions(); + nvinfer1::Dims dims_y = Y->getDimensions(); + + // The two input tensor should have the same dims + PADDLE_ENFORCE(dims_x.nbDims >= 3); + if (dims_x.nbDims == dims_y.nbDims) { + for (int i = 0; i < dims_x.nbDims; i++) { + if (dims_x.d[i] != dims_y.d[i]) + PADDLE_THROW("TensorRT unsupported tensor shape for Elementwise op!"); + } + } else { + PADDLE_THROW("TensorRT unsupported tensor shape for Elementwise op!"); + } + + auto op_pair = ops.find(op_type_); + if (op_pair == ops.end()) { + PADDLE_THROW("Wrong elementwise op type!"); + } + nvinfer1::IElementWiseLayer* layer = TRT_ENGINE_ADD_LAYER( + engine_, ElementWise, *const_cast(X), + *const_cast(Y), op_pair->second); + + auto output_name = op_desc.Output("Out")[0]; + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { // the test framework can not determine which is the + // output, so place the declaration inside. + engine_->DeclareOutput(output_name); + } + } + + protected: + static const std::unordered_map + ops; + std::string op_type_; +}; + +const std::unordered_map + ElementwiseTensorOpConverter::ops = { + {"add", nvinfer1::ElementWiseOperation::kSUM}, + {"mul", nvinfer1::ElementWiseOperation::kPROD}, + {"sub", nvinfer1::ElementWiseOperation::kSUB}, + {"div", nvinfer1::ElementWiseOperation::kDIV}, + {"min", nvinfer1::ElementWiseOperation::kMIN}, + {"pow", nvinfer1::ElementWiseOperation::kPOW}, + {"max", nvinfer1::ElementWiseOperation::kMAX}, +}; + +class ElementwiseTensorAddOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorAddOpConverter() { op_type_ = "add"; } +}; + +class ElementwiseTensorMulOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorMulOpConverter() { op_type_ = "mul"; } +}; + +class ElementwiseTensorSubOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorSubOpConverter() { op_type_ = "sub"; } +}; + +class ElementwiseTensorDivOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorDivOpConverter() { op_type_ = "div"; } +}; + +class ElementwiseTensorMinOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorMinOpConverter() { op_type_ = "min"; } +}; + +class ElementwiseTensorMaxOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorMaxOpConverter() { op_type_ = "max"; } +}; + +class ElementwiseTensorPowOpConverter : public ElementwiseTensorOpConverter { + public: + ElementwiseTensorPowOpConverter() { op_type_ = "pow"; } +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +REGISTER_TRT_OP_CONVERTER(elementwise_add_weight, ElementwiseWeightOpConverter); + +REGISTER_TRT_OP_CONVERTER(elementwise_add_tensor, + ElementwiseTensorAddOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_sub_tensor, + ElementwiseTensorSubOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_div_tensor, + ElementwiseTensorDivOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_mul_tensor, + ElementwiseTensorMulOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_max_tensor, + ElementwiseTensorMaxOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_min_tensor, + ElementwiseTensorMinOpConverter); +REGISTER_TRT_OP_CONVERTER(elementwise_pow_tensor, + ElementwiseTensorPowOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/fc_op.cc b/paddle/fluid/inference/tensorrt/convert/fc_op.cc new file mode 100644 index 0000000000..39fe1f609d --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/fc_op.cc @@ -0,0 +1,119 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +// Reorder the elements from istrides to ostrides, borrowed from TRT convert in +// tensorflow. +// https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/tensorrt/convert/convert_nodes.cc#L318 +template +void Reorder2(nvinfer1::DimsHW shape, const T* idata, nvinfer1::DimsHW istrides, + T* odata, nvinfer1::DimsHW ostrides) { + for (int h = 0; h < shape.h(); ++h) { + for (int w = 0; w < shape.w(); ++w) { + odata[h * ostrides.h() + w * ostrides.w()] = + idata[h * istrides.h() + w * istrides.w()]; + } + } +} +// indata c * k +// Reorder the data layout from CK to KC. +void ReorderCKtoKC(TensorRTEngine::Weight& iweights, // NOLINT + TensorRTEngine::Weight* oweights) { + int c = iweights.dims[0]; + int k = iweights.dims[1]; + oweights->dims.assign({k, c}); + nvinfer1::DimsHW istrides = {1, k}; + nvinfer1::DimsHW ostrides = {c, 1}; + Reorder2({k, c}, static_cast(iweights.get().values), istrides, + static_cast(const_cast(oweights->get().values)), + ostrides); +} + +/* + * FC converter convert a MUL op in Fluid to a FC layer in TRT. + */ +class FcOpConverter : public OpConverter { + public: + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + VLOG(4) << "convert a fluid fc op to tensorrt fc layer without bias"; + + framework::OpDesc op_desc(op, nullptr); + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1); // Y is a weight + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + + // Declare inputs + auto* X = engine_->GetITensor(op_desc.Input("X").front()); + + // Declare weights + auto* Y_v = scope.FindVar(op_desc.Input("Y").front()); + PADDLE_ENFORCE_NOT_NULL(Y_v); + auto* Y_t = Y_v->GetMutable(); + // This may trigger a GPU->CPU copy, because TRT's weight can only be + // assigned from CPU memory, that can't be avoided. + auto* weight_data = Y_t->mutable_data(platform::CPUPlace()); + PADDLE_ENFORCE_EQ(Y_t->dims().size(), 2UL); // a matrix + size_t n_output = Y_t->dims()[1]; + + framework::LoDTensor tmp; + tmp.Resize(Y_t->dims()); + memcpy(tmp.mutable_data(platform::CPUPlace()), weight_data, + Y_t->dims()[0] * Y_t->dims()[1] * sizeof(float)); + TensorRTEngine::Weight weight{nvinfer1::DataType::kFLOAT, + static_cast(weight_data), + Y_t->memory_size() / sizeof(float)}; + TensorRTEngine::Weight tmp_weight(nvinfer1::DataType::kFLOAT, + static_cast(tmp.data()), + Y_t->memory_size() / sizeof(float)); + weight.dims.assign({Y_t->dims()[0], Y_t->dims()[1]}); + tmp_weight.dims = weight.dims; + + // The data layout of TRT FC layer's weight is different from fluid's FC, + // need to reorder the elements. + ReorderCKtoKC(weight, &tmp_weight); + + // Currently, the framework can only handle one fluid op -> one TRT layer, + // but fc fuses `mul` and `bias` (2 fluid ops), so here is a trick, just + // handle `mul`, leave `add` as another layer. + // DEBUG + TensorRTEngine::Weight bias{nvinfer1::DataType::kFLOAT, nullptr, 0}; + + auto* layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, + *const_cast(X), + n_output, tmp_weight.get(), bias.get()); + + auto output_name = op_desc.Output("Out").front(); + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { + engine_->DeclareOutput(output_name); + } + } +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +REGISTER_TRT_OP_CONVERTER(fc, FcOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/io_converter.cc b/paddle/fluid/inference/tensorrt/convert/io_converter.cc index 32e8631fde..854f434d93 100644 --- a/paddle/fluid/inference/tensorrt/convert/io_converter.cc +++ b/paddle/fluid/inference/tensorrt/convert/io_converter.cc @@ -23,26 +23,42 @@ namespace tensorrt { using platform::is_gpu_place; using platform::is_cpu_place; -class DefaultInputConverter : public EngineInputConverter { +class DefaultIOConverter : public EngineIOConverter { public: - DefaultInputConverter() {} + DefaultIOConverter() {} // NOTE out is GPU memory. virtual void operator()(const LoDTensor& in, void* out, size_t max_size) override { PADDLE_ENFORCE(out != nullptr); - PADDLE_ENFORCE_LE(in.memory_size(), max_size); + PADDLE_ENFORCE(stream_ != nullptr); const auto& place = in.place(); + size_t size = in.memory_size(); + PADDLE_ENFORCE_LE(size, max_size); if (is_cpu_place(place)) { - PADDLE_ENFORCE(stream_ != nullptr); - PADDLE_ENFORCE_EQ(0, - cudaMemcpyAsync(out, in.data(), in.memory_size(), - cudaMemcpyHostToDevice, *stream_)); - + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(out, in.data(), size, + cudaMemcpyHostToDevice, *stream_)); } else if (is_gpu_place(place)) { - PADDLE_ENFORCE_EQ(0, - cudaMemcpyAsync(out, in.data(), in.memory_size(), - cudaMemcpyHostToHost, *stream_)); - + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(out, in.data(), size, + cudaMemcpyDeviceToDevice, *stream_)); + } else { + PADDLE_THROW("Unknown device for converter"); + } + cudaStreamSynchronize(*stream_); + } + // NOTE in is GPU memory. + virtual void operator()(const void* in, LoDTensor* out, + size_t max_size) override { + PADDLE_ENFORCE(in != nullptr); + PADDLE_ENFORCE(stream_ != nullptr); + const auto& place = out->place(); + size_t size = out->memory_size(); + PADDLE_ENFORCE_LE(size, max_size); + if (is_cpu_place(place)) { + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(out->data(), in, size, + cudaMemcpyDeviceToHost, *stream_)); + } else if (is_gpu_place(place)) { + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(out->data(), in, size, + cudaMemcpyDeviceToDevice, *stream_)); } else { PADDLE_THROW("Unknown device for converter"); } @@ -50,7 +66,8 @@ class DefaultInputConverter : public EngineInputConverter { } }; -REGISTER_TENSORRT_INPUT_CONVERTER(default, DefaultInputConverter); +// fluid LodTensor <-> tensorrt ITensor +REGISTER_TENSORRT_IO_CONVERTER(default, DefaultIOConverter); } // namespace tensorrt } // namespace inference diff --git a/paddle/fluid/inference/tensorrt/convert/io_converter.h b/paddle/fluid/inference/tensorrt/convert/io_converter.h index 8972dae92b..71c48e085d 100644 --- a/paddle/fluid/inference/tensorrt/convert/io_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/io_converter.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/utils/singleton.h" @@ -25,43 +26,57 @@ namespace tensorrt { using framework::LoDTensor; /* - * Convert Input from Fluid to an Engine. - * TensorRT's ITensor follows row major, NCHW. Fluid is also row major, so in - * most cases just need to copy the data. + * Convert Input from Fluid to TensorRT Engine. + * Convert Output from TensorRT Engine to Fluid. + * + * Note that TensorRT's ITensor follows row major, NCHW. Fluid is also row + * major, + * so in the default case just need to copy the data. */ -class EngineInputConverter { +class EngineIOConverter { public: - EngineInputConverter() {} + EngineIOConverter() {} virtual void operator()(const LoDTensor& in, void* out, size_t max_size) {} + virtual void operator()(const void* in, LoDTensor* out, size_t max_size) {} void SetStream(cudaStream_t* stream) { stream_ = stream; } - static void Run(const std::string& in_op_type, const LoDTensor& in, void* out, - size_t max_size, cudaStream_t* stream) { + static void ConvertInput(const std::string& op_type, const LoDTensor& in, + void* out, size_t max_size, cudaStream_t* stream) { PADDLE_ENFORCE(stream != nullptr); - auto* converter = Registry::Lookup( - in_op_type, "default" /* default_type */); + auto* converter = Registry::Lookup( + op_type, "default" /* default_type */); PADDLE_ENFORCE_NOT_NULL(converter); converter->SetStream(stream); (*converter)(in, out, max_size); } - virtual ~EngineInputConverter() {} + static void ConvertOutput(const std::string& op_type, const void* in, + LoDTensor* out, size_t max_size, + cudaStream_t* stream) { + PADDLE_ENFORCE(stream != nullptr); + auto* converter = Registry::Lookup( + op_type, "default" /* default_type */); + PADDLE_ENFORCE_NOT_NULL(converter); + converter->SetStream(stream); + (*converter)(in, out, max_size); + } + + virtual ~EngineIOConverter() {} protected: cudaStream_t* stream_{nullptr}; }; +#define REGISTER_TENSORRT_IO_CONVERTER(op_type__, Converter__) \ + struct trt_io_##op_type__##_converter { \ + trt_io_##op_type__##_converter() { \ + Registry::Register(#op_type__); \ + } \ + }; \ + trt_io_##op_type__##_converter trt_io_##op_type__##_converter__; + } // namespace tensorrt } // namespace inference } // namespace paddle - -#define REGISTER_TENSORRT_INPUT_CONVERTER(in_op_type__, Converter__) \ - struct trt_input_##in_op_type__##_converter { \ - trt_input_##in_op_type__##_converter() { \ - ::paddle::inference::Registry::Register< \ - Converter__>(#in_op_type__); \ - } \ - }; \ - trt_input_##in_op_type__##_converter trt_input_##in_op_type__##_converter__; diff --git a/paddle/fluid/inference/tensorrt/convert/mul_op.cc b/paddle/fluid/inference/tensorrt/convert/mul_op.cc index f9834ab156..514eb659a8 100644 --- a/paddle/fluid/inference/tensorrt/convert/mul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/mul_op.cc @@ -18,16 +18,35 @@ namespace paddle { namespace inference { namespace tensorrt { +/* + * MulOp, IMatrixMultiplyLayer in TRT. This Layer doesn't has weights. + */ class MulOpConverter : public OpConverter { public: - MulOpConverter() {} - void operator()(const framework::OpDesc& op) override { - LOG(INFO) << "convert a fluid mul op to tensorrt fc layer without bias"; + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + VLOG(4) << "convert a fluid mul op to tensorrt mul layer without bias"; + + framework::OpDesc op_desc(op, nullptr); + // Declare inputs + auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]); + auto* input2 = engine_->GetITensor(op_desc.Input("Y")[0]); + // Both the input1 and input2 do not need transpose. + auto* layer = TRT_ENGINE_ADD_LAYER( + engine_, MatrixMultiply, *const_cast(input1), false, + *const_cast(input2), false); + + auto output_name = op_desc.Output("Out")[0]; + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { // the test framework can not determine which is the + // output, so place the declaration inside. + engine_->DeclareOutput(output_name); + } } }; -REGISTER_TRT_OP_CONVERTER(mul, MulOpConverter); - } // namespace tensorrt } // namespace inference } // namespace paddle + +REGISTER_TRT_OP_CONVERTER(mul, MulOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 77c788550b..41faaf7212 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -17,6 +17,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/inference/tensorrt/engine.h" #include "paddle/fluid/inference/utils/singleton.h" @@ -31,25 +32,72 @@ namespace tensorrt { class OpConverter { public: OpConverter() {} - virtual void operator()(const framework::OpDesc& op) {} - void Run(const framework::OpDesc& op, TensorRTEngine* engine) { - std::string type = op.Type(); - auto* it = Registry::Lookup(type); - PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", type); - it->SetEngine(engine); - (*it)(op); - } + // Converter logic for an op. + virtual void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, + bool test_mode = false) {} + + // Convert a single fluid operator and add the corresponding layer to TRT. + // test_mode: whether the instance executes in an unit test. + void ConvertOp(const framework::proto::OpDesc& op, + const std::unordered_set& parameters, + const framework::Scope& scope, TensorRTEngine* engine, + bool test_mode = false) { + framework::OpDesc op_desc(op, nullptr); + + OpConverter* it{nullptr}; + + if (op_desc.Type() == "mul") { + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL); + std::string Y = op_desc.Input("Y")[0]; + if (parameters.count(Y)) { + it = Registry::Lookup("fc"); + } + } + if (op_desc.Type().find("elementwise") != std::string::npos) { + static std::unordered_set add_tensor_op_set{ + "add", "mul", "sub", "div", "max", "min", "pow"}; + // TODO(xingzhaolong): all mul, sub, div + // static std::unordered_set add_weight_op_set {"add", "mul", + // "sub", "div"}; + static std::unordered_set add_weight_op_set{"add"}; + PADDLE_ENFORCE_EQ(op_desc.Input("Y").size(), 1UL); + int op_type_len = op_desc.Type().size(); + std::string op_type = op_desc.Type().substr(op_type_len - 3, op_type_len); + std::string Y = op_desc.Input("Y")[0]; + if (parameters.count(Y)) { + PADDLE_ENFORCE(add_weight_op_set.count(op_type) > 0, + "Unsupported elementwise type" + op_type); + it = + Registry::Lookup("elementwise_" + op_type + "_weight"); + PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", + op_desc.Type()); + } else { + PADDLE_ENFORCE(add_tensor_op_set.count(op_type) > 0, + "Unsupported elementwise type" + op_type); + it = + Registry::Lookup("elementwise_" + op_type + "_tensor"); + } + } - // convert fluid op to tensorrt layer - void ConvertOp(const framework::OpDesc& op, TensorRTEngine* engine) { - OpConverter::Run(op, engine); + if (!it) { + it = Registry::Lookup(op_desc.Type()); + } + PADDLE_ENFORCE_NOT_NULL(it, "no OpConverter for optype [%s]", + op_desc.Type()); + it->SetEngine(engine); + (*it)(op, scope, test_mode); } - // convert fluid block to tensorrt network - void ConvertBlock(const framework::BlockDesc& block, TensorRTEngine* engine) { - for (auto op : block.AllOps()) { - OpConverter::Run(*op, engine); + // Convert a fluid block to tensorrt network, NOTE it just convert operators, + // the INetwork's inputs and outputs should specified in some other modules. + void ConvertBlock(const framework::proto::BlockDesc& block, + const std::unordered_set& parameters, + const framework::Scope& scope, TensorRTEngine* engine) { + for (int i = 0; i < block.ops_size(); i++) { + const auto& op = block.ops(i); + ConvertOp(op, parameters, scope, engine); } } @@ -60,6 +108,9 @@ class OpConverter { // TensorRT engine TensorRTEngine* engine_{nullptr}; + protected: + bool test_mode_; + private: // registered op converter map, whose key is the fluid op type, and value is // the pointer position of corresponding OpConverter class. @@ -68,14 +119,25 @@ class OpConverter { framework::Scope* scope_{nullptr}; }; -#define REGISTER_TRT_OP_CONVERTER(op_type__, Converter__) \ - struct trt_##op_type__##_converter { \ - trt_##op_type__##_converter() { \ - Registry::Register(#op_type__); \ - } \ - }; \ - trt_##op_type__##_converter trt_##op_type__##_converter__; - } // namespace tensorrt } // namespace inference } // namespace paddle + +#define REGISTER_TRT_OP_CONVERTER(op_type__, Converter__) \ + struct trt_##op_type__##_converter : public ::paddle::framework::Registrar { \ + trt_##op_type__##_converter() { \ + ::paddle::inference:: \ + Registry::Register< \ + ::paddle::inference::tensorrt::Converter__>(#op_type__); \ + } \ + }; \ + trt_##op_type__##_converter trt_##op_type__##_converter__; \ + int TouchConverterRegister_##op_type__() { \ + trt_##op_type__##_converter__.Touch(); \ + return 0; \ + } + +#define USE_TRT_CONVERTER(op_type__) \ + extern int TouchConverterRegister_##op_type__(); \ + static int use_op_converter_trt_##op_type__ __attribute__((unused)) = \ + TouchConverterRegister_##op_type__(); diff --git a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc new file mode 100644 index 0000000000..11cad95361 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc @@ -0,0 +1,80 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +/* + * Pool2dOp, IPoolingLayer in TRT. This Layer doesn't has weights. + */ +class Pool2dOpConverter : public OpConverter { + public: + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + VLOG(4) + << "convert a fluid pool2d op to tensorrt pool2d layer without bias"; + framework::OpDesc op_desc(op, nullptr); + // Declare inputs + PADDLE_ENFORCE_EQ(op_desc.Input("X").size(), 1); + PADDLE_ENFORCE_EQ(op_desc.Output("Out").size(), 1); + auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]); + + std::string pool_type = + boost::get(op_desc.GetAttr("pooling_type")); + std::vector ksize = + boost::get>(op_desc.GetAttr("ksize")); + std::vector strides = + boost::get>(op_desc.GetAttr("strides")); + std::vector paddings = + boost::get>(op_desc.GetAttr("paddings")); + + const nvinfer1::DimsHW nv_ksize(ksize[0], ksize[1]); + const nvinfer1::DimsHW nv_strides(strides[0], strides[1]); + const nvinfer1::DimsHW nv_paddings(paddings[0], paddings[1]); + + PADDLE_ENFORCE_EQ(input1->getDimensions().nbDims, 3UL); + + nvinfer1::PoolingType nv_pool_type = nvinfer1::PoolingType::kMAX; + if (pool_type == "max") { + nv_pool_type = nvinfer1::PoolingType::kMAX; + } else if (pool_type == "avg") { + nv_pool_type = nvinfer1::PoolingType::kAVERAGE; + } else { + PADDLE_THROW("TensorRT unsupported pooling type!"); + } + + auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Pooling, + *const_cast(input1), + nv_pool_type, nv_ksize); + PADDLE_ENFORCE_NOT_NULL(layer, "pool layer could not be created."); + layer->setStride(nv_strides); + layer->setPadding(nv_paddings); + + auto output_name = op_desc.Output("Out")[0]; + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { + engine_->DeclareOutput(output_name); + } + } +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +USE_OP(pool2d); +REGISTER_TRT_OP_CONVERTER(pool2d, Pool2dOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/softmax_op.cc b/paddle/fluid/inference/tensorrt/convert/softmax_op.cc new file mode 100644 index 0000000000..0064f90fd7 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/softmax_op.cc @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +/* + * SoftMaxOp, ISoftMaxLayer in TRT. This Layer doesn't has weights. + */ +class SoftMaxOpConverter : public OpConverter { + public: + void operator()(const framework::proto::OpDesc& op, + const framework::Scope& scope, bool test_mode) override { + VLOG(4) + << "convert a fluid softmax op to tensorrt softmax layer without bias"; + framework::OpDesc op_desc(op, nullptr); + // Declare inputs + auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]); + auto* layer = TRT_ENGINE_ADD_LAYER(engine_, SoftMax, + *const_cast(input1)); + + auto output_name = op_desc.Output("Out")[0]; + engine_->SetITensor(output_name, layer->getOutput(0)); + if (test_mode) { + engine_->DeclareOutput(output_name); + } + } +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +USE_OP(softmax); +REGISTER_TRT_OP_CONVERTER(softmax, SoftMaxOpConverter); diff --git a/paddle/fluid/inference/tensorrt/convert/test_activation_op.cc b/paddle/fluid/inference/tensorrt/convert/test_activation_op.cc index 23e3435c21..e82762ea03 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_activation_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_activation_op.cc @@ -1,94 +1,47 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ #include -#include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" -#include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/place.h" - -USE_OP(relu); +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" namespace paddle { namespace inference { namespace tensorrt { -void Compare(float input, float expect) { +TEST(ReluOpConverter, main) { framework::Scope scope; - platform::CUDAPlace place; - platform::CUDADeviceContext ctx(place); - - // init fluid op and variable - auto x_var = scope.Var("X"); - auto x_tensor = x_var->GetMutable(); - x_tensor->Resize({1, 1}); - std::vector init; - init.push_back(input); - framework::TensorFromVector(init, ctx, x_tensor); - - auto out_var = scope.Var("Out"); - auto out_tensor = out_var->GetMutable(); - out_tensor->Resize({1, 1}); - out_tensor->mutable_data(place); - - framework::OpDesc op_desc; - op_desc.SetType("relu"); - op_desc.SetInput("X", {"X"}); - op_desc.SetOutput("Out", {"Out"}); - - auto relu_op = framework::OpRegistry::CreateOp(op_desc); - - // run fluid op - relu_op->Run(scope, place); - std::vector out1; - framework::TensorToVector(*out_tensor, ctx, &out1); - - // init tensorrt op - cudaStream_t stream; - ASSERT_EQ(0, cudaStreamCreate(&stream)); - TensorRTEngine* engine = new TensorRTEngine(1, 1 << 10, &stream); - engine->InitNetwork(); - engine->DeclareInput("X", nvinfer1::DataType::kFLOAT, - nvinfer1::DimsCHW{1, 1, 1}); - - OpConverter op_converter; - op_converter.ConvertOp(op_desc, engine); - - engine->DeclareOutput("Out"); - engine->FreezeNetwork(); - engine->SetInputFromCPU("X", &input, 1 * sizeof(float)); - - // run tensorrt op - engine->Execute(1); - - float out2; - engine->GetOutputInCPU("Out", &out2, 1 * sizeof(float)); - - ASSERT_EQ(out1[0], out2); - ASSERT_EQ(out1[0], expect); - - delete engine; - cudaStreamDestroy(stream); -} - -TEST(OpConverter, ConvertRelu) { - Compare(1, 1); // relu(1) = 1 - Compare(-5, 0); // relu(-5) = 0 + std::unordered_set parameters; + TRTConvertValidation validator(10, parameters, scope, 1000); + validator.DeclInputVar("relu-X", nvinfer1::Dims2(10, 6)); + validator.DeclOutputVar("relu-Out", nvinfer1::Dims2(10, 6)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("relu"); + desc.SetInput("X", {"relu-X"}); + desc.SetOutput("Out", {"relu-Out"}); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(5); } } // namespace tensorrt } // namespace inference } // namespace paddle + +USE_OP(relu); diff --git a/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc new file mode 100644 index 0000000000..f8711c6b60 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_conv2d_op.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(conv2d_op, test) { + std::unordered_set parameters({"conv2d-Y"}); + framework::Scope scope; + TRTConvertValidation validator(5, parameters, scope, 1 << 15); + + validator.DeclInputVar("conv2d-X", nvinfer1::Dims3(2, 5, 5)); + validator.DeclParamVar("conv2d-Y", nvinfer1::Dims4(3, 2, 3, 3)); + validator.DeclOutputVar("conv2d-Out", nvinfer1::Dims3(3, 5, 5)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("conv2d"); + desc.SetInput("Input", {"conv2d-X"}); + desc.SetInput("Filter", {"conv2d-Y"}); + desc.SetOutput("Output", {"conv2d-Out"}); + + const std::vector strides({1, 1}); + const std::vector paddings({1, 1}); + const std::vector dilations({1, 1}); + const int groups = 1; + + desc.SetAttr("strides", strides); + desc.SetAttr("paddings", paddings); + desc.SetAttr("dilations", dilations); + desc.SetAttr("groups", groups); + + validator.SetOp(*desc.Proto()); + + validator.Execute(3); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle +USE_OP(conv2d); diff --git a/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc new file mode 100644 index 0000000000..7537d02a35 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_elementwise_op.cc @@ -0,0 +1,73 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(elementwise_op, add_weight_test) { + std::unordered_set parameters({"elementwise_add-Y"}); + framework::Scope scope; + TRTConvertValidation validator(10, parameters, scope, 1 << 15); + validator.DeclInputVar("elementwise_add-X", nvinfer1::DimsCHW(10, 3, 3)); + validator.DeclParamVar("elementwise_add-Y", nvinfer1::Dims3(10, 1, 1)); + // validator.DeclParamVar("mul-Y", nvinfer1::Dims2(8, 2)); + validator.DeclOutputVar("elementwise_add-Out", nvinfer1::DimsCHW(10, 3, 3)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("elementwise_add"); + desc.SetInput("X", {"elementwise_add-X"}); + desc.SetInput("Y", {"elementwise_add-Y"}); + desc.SetOutput("Out", {"elementwise_add-Out"}); + + int axis = 1; + desc.SetAttr("axis", axis); + + validator.SetOp(*desc.Proto()); + + validator.Execute(8); +} + +TEST(elementwise_op, add_tensor_test) { + std::unordered_set parameters; + framework::Scope scope; + TRTConvertValidation validator(8, parameters, scope, 1 << 15); + validator.DeclInputVar("elementwise_add-X", nvinfer1::DimsCHW(10, 3, 3)); + validator.DeclInputVar("elementwise_add-Y", nvinfer1::Dims3(10, 3, 3)); + // validator.DeclParamVar("mul-Y", nvinfer1::Dims2(8, 2)); + validator.DeclOutputVar("elementwise_add-Out", nvinfer1::DimsCHW(10, 3, 3)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("elementwise_add"); + desc.SetInput("X", {"elementwise_add-X"}); + desc.SetInput("Y", {"elementwise_add-Y"}); + desc.SetOutput("Out", {"elementwise_add-Out"}); + + // the defalut axis of elementwise op is -1 + + validator.SetOp(*desc.Proto()); + + validator.Execute(8); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle +USE_OP(elementwise_add); diff --git a/paddle/fluid/inference/tensorrt/convert/test_fc_op.cc b/paddle/fluid/inference/tensorrt/convert/test_fc_op.cc new file mode 100644 index 0000000000..1ae2668e73 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_fc_op.cc @@ -0,0 +1,46 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(fc_op, test) { + std::unordered_set parameters({"mul-Y"}); + framework::Scope scope; + TRTConvertValidation validator(10, parameters, scope, 1000); + validator.DeclInputVar("mul-X", nvinfer1::Dims3(10, 1, 1)); + validator.DeclParamVar("mul-Y", nvinfer1::Dims2(10, 2)); + validator.DeclOutputVar("mul-Out", nvinfer1::Dims2(1, 2)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("mul"); + desc.SetInput("X", {"mul-X"}); + desc.SetInput("Y", {"mul-Y"}); + desc.SetOutput("Out", {"mul-Out"}); + + validator.SetOp(*desc.Proto()); + + validator.Execute(10); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle +USE_OP(mul); diff --git a/paddle/fluid/inference/tensorrt/convert/test_io_converter.cc b/paddle/fluid/inference/tensorrt/convert/test_io_converter.cc index afcc516e6b..8f91309a0a 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_io_converter.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_io_converter.cc @@ -12,40 +12,63 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/tensorrt/convert/io_converter.h" -#include - namespace paddle { namespace inference { namespace tensorrt { -class EngineInputConverterTester : public ::testing::Test { - public: - void SetUp() override { tensor.Resize({10, 10}); } +void IOConverterTester(const platform::DeviceContext& ctx) { + cudaStream_t stream; + ASSERT_EQ(0, cudaStreamCreate(&stream)); - framework::LoDTensor tensor; -}; + // init fluid in_tensor + framework::LoDTensor in_tensor; + in_tensor.Resize({10, 10}); + auto place = ctx.GetPlace(); + in_tensor.mutable_data(place); + std::vector init; + for (int64_t i = 0; i < 10 * 10; ++i) { + init.push_back(i); + } + framework::TensorFromVector(init, ctx, &in_tensor); -TEST_F(EngineInputConverterTester, DefaultCPU) { + // init tensorrt buffer void* buffer; - tensor.mutable_data(platform::CPUPlace()); - ASSERT_EQ(cudaMalloc(&buffer, tensor.memory_size()), 0); + size_t size = in_tensor.memory_size(); + ASSERT_EQ(cudaMalloc(&buffer, size), 0); - cudaStream_t stream; - EngineInputConverter::Run("test", tensor, buffer, tensor.memory_size(), - &stream); + // convert fluid in_tensor to tensorrt buffer + EngineIOConverter::ConvertInput("test", in_tensor, buffer, size, &stream); + + // convert tensorrt buffer to fluid out_tensor + framework::LoDTensor out_tensor; + out_tensor.Resize({10, 10}); + out_tensor.mutable_data(place); + EngineIOConverter::ConvertOutput("test", buffer, &out_tensor, size, &stream); + + // compare in_tensor and out_tensor + std::vector result; + framework::TensorToVector(out_tensor, ctx, &result); + EXPECT_EQ(init.size(), result.size()); + for (size_t i = 0; i < init.size(); i++) { + EXPECT_EQ(init[i], result[i]); + } + cudaStreamDestroy(stream); } -TEST_F(EngineInputConverterTester, DefaultGPU) { - void* buffer; - tensor.mutable_data(platform::CUDAPlace()); - ASSERT_EQ(cudaMalloc(&buffer, tensor.memory_size()), 0); +TEST(EngineIOConverterTester, DefaultCPU) { + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + IOConverterTester(ctx); +} - cudaStream_t stream; - EngineInputConverter::Run("test", tensor, buffer, tensor.memory_size(), - &stream); +TEST(EngineIOConverterTester, DefaultGPU) { + platform::CUDAPlace place; + platform::CUDADeviceContext ctx(place); + IOConverterTester(ctx); } } // namespace tensorrt diff --git a/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc b/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc new file mode 100644 index 0000000000..3d34cd7d5d --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_mul_op.cc @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(MulOpConverter, main) { + framework::Scope scope; + std::unordered_set parameters; + TRTConvertValidation validator(10, parameters, scope, 1000, false); + validator.DeclInputVar("mul-X", nvinfer1::Dims2(10, 6)); + validator.DeclInputVar("mul-Y", nvinfer1::Dims2(6, 10)); + validator.DeclOutputVar("mul-Out", nvinfer1::Dims2(10, 10)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("mul"); + desc.SetInput("X", {"mul-X"}); + desc.SetInput("Y", {"mul-Y"}); + desc.SetOutput("Out", {"mul-Out"}); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(2); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +USE_OP(mul); diff --git a/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc b/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc index aa5fb726f1..d6651a5b24 100644 --- a/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc +++ b/paddle/fluid/inference/tensorrt/convert/test_op_converter.cc @@ -12,9 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" + #include #include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" namespace paddle { namespace inference { @@ -23,15 +24,47 @@ namespace tensorrt { TEST(OpConverter, ConvertBlock) { framework::ProgramDesc prog; auto* block = prog.MutableBlock(0); - auto* mul_op = block->AppendOp(); - mul_op->SetType("mul"); auto* conv2d_op = block->AppendOp(); + + // init trt engine + cudaStream_t stream_; + std::unique_ptr engine_; + engine_.reset(new TensorRTEngine(5, 1 << 15, &stream_)); + engine_->InitNetwork(); + PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); + + engine_->DeclareInput("conv2d-X", nvinfer1::DataType::kFLOAT, + nvinfer1::Dims3(2, 5, 5)); + conv2d_op->SetType("conv2d"); + conv2d_op->SetInput("Input", {"conv2d-X"}); + conv2d_op->SetInput("Filter", {"conv2d-Y"}); + conv2d_op->SetOutput("Output", {"conv2d-Out"}); + + const std::vector strides({1, 1}); + const std::vector paddings({1, 1}); + const std::vector dilations({1, 1}); + const int groups = 1; + + conv2d_op->SetAttr("strides", strides); + conv2d_op->SetAttr("paddings", paddings); + conv2d_op->SetAttr("dilations", dilations); + conv2d_op->SetAttr("groups", groups); + + // init scope + framework::Scope scope; + std::vector dim_vec = {3, 2, 3, 3}; + auto* x = scope.Var("conv2d-Y"); + auto* x_tensor = x->GetMutable(); + x_tensor->Resize(framework::make_ddim(dim_vec)); OpConverter converter; - converter.ConvertBlock(*block, nullptr /*TensorRTEngine*/); + converter.ConvertBlock(*block->Proto(), {"conv2d-Y"}, scope, + engine_.get() /*TensorRTEngine*/); } } // namespace tensorrt } // namespace inference } // namespace paddle + +USE_TRT_CONVERTER(conv2d) diff --git a/paddle/fluid/inference/tensorrt/convert/test_pool2d_op.cc b/paddle/fluid/inference/tensorrt/convert/test_pool2d_op.cc new file mode 100644 index 0000000000..c5dddbc8cd --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_pool2d_op.cc @@ -0,0 +1,60 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(Pool2dOpConverter, main) { + framework::Scope scope; + std::unordered_set parameters; + TRTConvertValidation validator(5, parameters, scope, 1 << 15); + + // The ITensor's Dims should not contain the batch size. + // So, the ITensor's Dims of input and output should be C * H * W. + validator.DeclInputVar("pool2d-X", nvinfer1::Dims3(3, 4, 4)); + validator.DeclOutputVar("pool2d-Out", nvinfer1::Dims3(3, 2, 2)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("pool2d"); + desc.SetInput("X", {"pool2d-X"}); + desc.SetOutput("Out", {"pool2d-Out"}); + + std::vector ksize({2, 2}); + std::vector strides({2, 2}); + std::vector paddings({0, 0}); + std::string pooling_t = "max"; + + desc.SetAttr("pooling_type", pooling_t); + desc.SetAttr("ksize", ksize); + desc.SetAttr("strides", strides); + desc.SetAttr("paddings", paddings); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(3); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +USE_OP(pool2d); diff --git a/paddle/fluid/inference/tensorrt/convert/test_softmax_op.cc b/paddle/fluid/inference/tensorrt/convert/test_softmax_op.cc new file mode 100644 index 0000000000..503ce71f7f --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/test_softmax_op.cc @@ -0,0 +1,49 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +TEST(SoftMaxOpConverter, main) { + framework::Scope scope; + std::unordered_set parameters; + TRTConvertValidation validator(8, parameters, scope, 1000); + + std::vector tensor_shape{8, 10}; + validator.DeclInputVar("softmax-X", tensor_shape, + nvinfer1::DimsCHW(10, 1, 1)); + validator.DeclOutputVar("softmax-Out", nvinfer1::DimsCHW(10, 1, 1)); + + // Prepare Op description + framework::OpDesc desc; + desc.SetType("softmax"); + desc.SetInput("X", {"softmax-X"}); + desc.SetOutput("Out", {"softmax-Out"}); + + LOG(INFO) << "set OP"; + validator.SetOp(*desc.Proto()); + LOG(INFO) << "execute"; + + validator.Execute(3); +} + +} // namespace tensorrt +} // namespace inference +} // namespace paddle + +USE_OP(softmax); diff --git a/paddle/fluid/inference/tensorrt/convert/ut_helper.h b/paddle/fluid/inference/tensorrt/convert/ut_helper.h new file mode 100644 index 0000000000..4265f33f28 --- /dev/null +++ b/paddle/fluid/inference/tensorrt/convert/ut_helper.h @@ -0,0 +1,206 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +/* + * This file implements a UT framework to make the validation of transforming + * Fluid Op to TRT Layer. + */ + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/inference/utils/singleton.h" + +namespace paddle { +namespace inference { +namespace tensorrt { + +/* + * Get a random float value between [low, high] + */ +float random(float low, float high) { + static std::random_device rd; + static std::mt19937 mt(rd()); + std::uniform_real_distribution dist(low, high); + return dist(mt); +} + +void RandomizeTensor(framework::LoDTensor* tensor, const platform::Place& place, + const platform::DeviceContext& ctx) { + auto dims = tensor->dims(); + size_t num_elements = analysis::AccuDims(dims, dims.size()); + PADDLE_ENFORCE_GT(num_elements, 0); + auto* data = tensor->mutable_data(place); + + for (size_t i = 0; i < num_elements; i++) { + *(data + i) = random(0., 1.); + } +} + +/* + * Help to validate the correctness between Fluid Op and the corresponding TRT + * layer. + */ +class TRTConvertValidation { + public: + TRTConvertValidation() = delete; + + TRTConvertValidation(int max_batch_size, + const std::unordered_set& parameters, + framework::Scope& scope, // NOLINT + int workspace_size = 1 << 10, bool if_add_batch = true) + : parameters_(parameters), + scope_(scope), + if_add_batch_(if_add_batch), + max_batch_size_(max_batch_size) { + // create engine. + engine_.reset(new TensorRTEngine(max_batch_size, workspace_size, &stream_)); + engine_->InitNetwork(); + + PADDLE_ENFORCE_EQ(cudaStreamCreate(&stream_), 0); + } + + // Declare a Variable as input with random initialization. + void DeclInputVar(const std::string& name, const std::vector tensor_dims, + const nvinfer1::Dims& trt_dims) { + DeclVar(name, tensor_dims); + engine_->DeclareInput(name, nvinfer1::DataType::kFLOAT, trt_dims); + } + + void DeclInputVar(const std::string& name, const nvinfer1::Dims& dims) { + DeclVar(name, dims); + // Declare TRT inputs. + engine_->DeclareInput(name, nvinfer1::DataType::kFLOAT, dims); + } + + // Declare a parameter varaible in the scope. + void DeclParamVar(const std::string& name, const nvinfer1::Dims& dims) { + DeclVar(name, dims, true); + } + + void DeclOutputVar(const std::string& name, const nvinfer1::Dims& dims) { + DeclVar(name, dims); + } + + void DeclVar(const std::string& name, const std::vector dim_vec) { + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + + auto* x = scope_.Var(name); + auto* x_tensor = x->GetMutable(); + x_tensor->Resize(framework::make_ddim(dim_vec)); + RandomizeTensor(x_tensor, place, ctx); + } + // Declare a variable in a fluid Scope. + void DeclVar(const std::string& name, const nvinfer1::Dims& dims, + bool is_param = false) { + // Init Fluid tensor. + std::vector dim_vec(dims.d, dims.d + dims.nbDims); + // There is no batchsize in ITensor's shape, but We should add it to + // tensor's shape of fluid. If the variable is not parameter and the + // if_add_batch_ flag is true, add the max batchsize to dim_vec. + if (is_param != true && if_add_batch_ == true) + dim_vec.insert(dim_vec.begin(), max_batch_size_); + + DeclVar(name, dim_vec); + } + + void SetOp(const framework::proto::OpDesc& desc) { + op_ = framework::OpRegistry::CreateOp(desc); + + Singleton::Global().ConvertOp( + desc, parameters_, scope_, engine_.get(), true /*test_mode*/); + + engine_->FreezeNetwork(); + + // Declare outputs. + op_desc_.reset(new framework::OpDesc(desc, nullptr)); + + // Set Inputs. + for (const auto& input : op_desc_->InputArgumentNames()) { + if (parameters_.count(input)) continue; + auto* var = scope_.FindVar(input); + PADDLE_ENFORCE(var); + auto tensor = var->GetMutable(); + + engine_->SetInputFromCPU( + input, static_cast(tensor->data()), + sizeof(float) * + analysis::AccuDims(tensor->dims(), tensor->dims().size())); + } + } + + void Execute(int batch_size) { + // Execute Fluid Op + PADDLE_ENFORCE_LE(batch_size, max_batch_size_); + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + op_->Run(scope_, place); + // Execute TRT. + engine_->Execute(batch_size); + cudaStreamSynchronize(*engine_->stream()); + + ASSERT_FALSE(op_desc_->OutputArgumentNames().empty()); + const size_t output_space_size = 3000; + for (const auto& output : op_desc_->OutputArgumentNames()) { + std::vector fluid_out; + std::vector trt_out(output_space_size); + engine_->GetOutputInCPU(output, &trt_out[0], output_space_size); + cudaStreamSynchronize(*engine_->stream()); + + auto* var = scope_.FindVar(output); + auto tensor = var->GetMutable(); + framework::TensorToVector(*tensor, ctx, &fluid_out); + + size_t fluid_out_size = fluid_out.size(); + if (if_add_batch_ == true) { + fluid_out_size = + batch_size * (framework::product(tensor->dims()) / max_batch_size_); + } + // Compare two output + ASSERT_FALSE(fluid_out.empty()); + for (size_t i = 0; i < fluid_out_size; i++) { + // Loose the threshold for CI in different machine model. + EXPECT_LT(std::abs(fluid_out[i] - trt_out[i]), 2e-5); + } + } + } + + framework::Scope& scope() { return scope_; } + + private: + std::unique_ptr engine_; + cudaStream_t stream_; + std::unique_ptr op_; + std::unique_ptr op_desc_; + const std::unordered_set& parameters_; + framework::Scope& scope_; + // The ITensor of trt does not cotain the batch size, + // bug, in most cases, we need to set batch size for + // fluid's tensor shape. This variable indicates + // whether to add batch size to tensor shape of fluid. + bool if_add_batch_; + int max_batch_size_; +}; + +} // namespace tensorrt +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/engine.cc b/paddle/fluid/inference/tensorrt/engine.cc index df123a5907..b821c3d0bf 100644 --- a/paddle/fluid/inference/tensorrt/engine.cc +++ b/paddle/fluid/inference/tensorrt/engine.cc @@ -1,7 +1,7 @@ /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. +Licensed under the Apache License, Version 2.0 (the "License"); you may not use +this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include +#include "paddle/fluid/inference/analysis/helper.h" #include "paddle/fluid/inference/tensorrt/helper.h" #include "paddle/fluid/platform/enforce.h" @@ -25,21 +26,35 @@ namespace paddle { namespace inference { namespace tensorrt { -void TensorRTEngine::Build(const DescType& paddle_model) { +int TensorRTEngine::runtime_batch_ = 1; + +void TensorRTEngine::Build(const DescType &paddle_model) { PADDLE_ENFORCE(false, "not implemented"); } void TensorRTEngine::Execute(int batch_size) { - infer_context_->enqueue(batch_size, buffers_.data(), *stream_, nullptr); + batch_size_ = batch_size; + std::vector buffers; + for (auto &buf : buffers_) { + PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated"); + PADDLE_ENFORCE_GT(buf.max_size, 0); + PADDLE_ENFORCE(buf.device == DeviceType::GPU); + buffers.push_back(buf.buffer); + } + PADDLE_ENFORCE_NOT_NULL(stream_); + infer_context_->enqueue(batch_size, buffers.data(), *stream_, nullptr); cudaStreamSynchronize(*stream_); + SetRuntimeBatch(batch_size); } TensorRTEngine::~TensorRTEngine() { + cudaStreamSynchronize(*stream_); // clean buffer - for (auto& buffer : buffers_) { - if (buffer != nullptr) { - PADDLE_ENFORCE_EQ(0, cudaFree(buffer)); - buffer = nullptr; + for (auto &buf : buffers_) { + if (buf.device == DeviceType::GPU && buf.buffer != nullptr) { + PADDLE_ENFORCE_EQ(0, cudaFree(buf.buffer)); + buf.buffer = nullptr; + buf.max_size = 0; } } } @@ -59,75 +74,123 @@ void TensorRTEngine::FreezeNetwork() { infer_context_.reset(infer_engine_->createExecutionContext()); // allocate GPU buffers. - buffers_.resize(buffer_sizes_.size(), nullptr); - for (auto& item : buffer_sizes_) { + buffers_.resize(buffer_sizes_.size()); + for (auto &item : buffer_sizes_) { + // The output buffers are not set in the network building phrase, need to + // infer from the TesorRT network. if (item.second == 0) { auto slot_offset = infer_engine_->getBindingIndex(item.first.c_str()); + auto dims = infer_engine_->getBindingDimensions(slot_offset); item.second = kDataTypeSize[static_cast( infer_engine_->getBindingDataType(slot_offset))] * - AccumDims(infer_engine_->getBindingDimensions(slot_offset)); + analysis::AccuDims(dims.d, dims.nbDims) * max_batch_; + PADDLE_ENFORCE_GT(item.second, 0); } - PADDLE_ENFORCE_EQ(0, cudaMalloc(&buffer(item.first), item.second)); + + auto &buf = buffer(item.first); + buf.max_size = item.second * max_batch_; + CHECK(buf.buffer == nullptr); // buffer should be allocated only once. + + PADDLE_ENFORCE_EQ(0, cudaMalloc(&buf.buffer, item.second * max_batch_)); + buf.size = 0; + PADDLE_ENFORCE_LE(buf.max_size, 1 << 30); // 10G + buf.device = DeviceType::GPU; } } -nvinfer1::ITensor* TensorRTEngine::DeclareInput(const std::string& name, +nvinfer1::ITensor *TensorRTEngine::DeclareInput(const std::string &name, nvinfer1::DataType dtype, - const nvinfer1::Dims& dim) { + const nvinfer1::Dims &dims) { PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate input name %s", name); PADDLE_ENFORCE(infer_network_ != nullptr, "should initnetwork first"); - auto* input = infer_network_->addInput(name.c_str(), dtype, dim); + auto *input = infer_network_->addInput(name.c_str(), dtype, dims); PADDLE_ENFORCE(input, "infer network add input %s failed", name); - buffer_sizes_[name] = kDataTypeSize[static_cast(dtype)] * AccumDims(dim); + buffer_sizes_[name] = kDataTypeSize[static_cast(dtype)] * + analysis::AccuDims(dims.d, dims.nbDims) * max_batch_; + PADDLE_ENFORCE(input->isNetworkInput()); TensorRTEngine::SetITensor(name, input); return input; } -void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer* layer, int offset, - const std::string& name) { +void TensorRTEngine::DeclareOutput(const nvinfer1::ILayer *layer, int offset, + const std::string &name) { PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s", name); - auto* output = layer->getOutput(offset); + auto *output = layer->getOutput(offset); + SetITensor(name, output); PADDLE_ENFORCE(output != nullptr); output->setName(name.c_str()); + PADDLE_ENFORCE(!output->isNetworkInput()); infer_network_->markOutput(*output); + PADDLE_ENFORCE(output->isNetworkOutput()); // output buffers' size can only be decided latter, set zero here to mark this // and will reset latter. buffer_sizes_[name] = 0; } -void TensorRTEngine::DeclareOutput(const std::string& name) { +void TensorRTEngine::DeclareOutput(const std::string &name) { PADDLE_ENFORCE_EQ(0, buffer_sizes_.count(name), "duplicate output name %s", name); - auto* output = TensorRTEngine::GetITensor(name); + auto *output = TensorRTEngine::GetITensor(name); PADDLE_ENFORCE(output != nullptr); output->setName(name.c_str()); + PADDLE_ENFORCE(!output->isNetworkInput()); infer_network_->markOutput(*output); // output buffers' size can only be decided latter, set zero here to mark this // and will reset latter. buffer_sizes_[name] = 0; } -void* TensorRTEngine::GetOutputInGPU(const std::string& name) { - return buffer(name); +void *TensorRTEngine::GetOutputInGPU(const std::string &name) { + return buffer(name).buffer; +} + +void TensorRTEngine::GetOutputInGPU(const std::string &name, void *dst, + size_t max_size) { + // determine data size + auto *output = TensorRTEngine::GetITensor(name); + nvinfer1::Dims dims = output->getDimensions(); + auto dim_size = analysis::AccuDims(dims.d, dims.nbDims); + size_t dst_size = dim_size * runtime_batch_ * + kDataTypeSize[static_cast(output->getType())]; + + auto it = buffer_sizes_.find(name); + PADDLE_ENFORCE(it != buffer_sizes_.end()); + PADDLE_ENFORCE_GT(it->second, 0); + PADDLE_ENFORCE_LE(dst_size, it->second); + PADDLE_ENFORCE_GE(max_size, dst_size); + auto &buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before"); + PADDLE_ENFORCE_EQ(cudaMemcpyAsync(dst, buf.buffer, dst_size, + cudaMemcpyDeviceToDevice, *stream_), + 0); } -void TensorRTEngine::GetOutputInCPU(const std::string& name, void* dst, +void TensorRTEngine::GetOutputInCPU(const std::string &name, void *dst, size_t max_size) { // determine data size + + auto *output = TensorRTEngine::GetITensor(name); + nvinfer1::Dims dims = output->getDimensions(); + auto dim_size = analysis::AccuDims(dims.d, dims.nbDims); + size_t dst_size = dim_size * runtime_batch_ * + kDataTypeSize[static_cast(output->getType())]; auto it = buffer_sizes_.find(name); PADDLE_ENFORCE(it != buffer_sizes_.end()); PADDLE_ENFORCE_GT(it->second, 0); - PADDLE_ENFORCE_GE(max_size, it->second); - PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buffer(name), it->second, + PADDLE_ENFORCE_LE(dst_size, it->second); + PADDLE_ENFORCE_GE(max_size, dst_size); + auto &buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer, "buffer should be allocated before"); + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(dst, buf.buffer, dst_size, cudaMemcpyDeviceToHost, *stream_)); } -void*& TensorRTEngine::buffer(const std::string& name) { +Buffer &TensorRTEngine::buffer(const std::string &name) { PADDLE_ENFORCE(infer_engine_ != nullptr, "call FreezeNetwork first."); auto it = buffer_sizes_.find(name); PADDLE_ENFORCE(it != buffer_sizes_.end()); @@ -135,27 +198,49 @@ void*& TensorRTEngine::buffer(const std::string& name) { return buffers_[slot_offset]; } -void TensorRTEngine::SetInputFromCPU(const std::string& name, void* data, +void TensorRTEngine::SetInputFromCPU(const std::string &name, const void *data, + size_t size) { + auto &buf = buffer(name); + PADDLE_ENFORCE_NOT_NULL(buf.buffer); + PADDLE_ENFORCE_NOT_NULL(data); + PADDLE_ENFORCE_NOT_NULL(stream_); + PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small"); + PADDLE_ENFORCE(buf.device == DeviceType::GPU); + buf.size = size; + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size, + cudaMemcpyHostToDevice, *stream_)); +} + +void TensorRTEngine::SetInputFromGPU(const std::string &name, const void *data, size_t size) { - void* buf = buffer(name); - cudaMemcpyAsync(buf, data, size, cudaMemcpyHostToDevice, *stream_); - PADDLE_ENFORCE_EQ( - 0, cudaMemcpyAsync(buf, data, size, cudaMemcpyHostToDevice, *stream_)); + auto &buf = buffer(name); + buf.size = size; + PADDLE_ENFORCE_NOT_NULL(buf.buffer); + PADDLE_ENFORCE_LE(size, buf.max_size, "buffer is too small"); + PADDLE_ENFORCE(buf.device == DeviceType::GPU); + PADDLE_ENFORCE_EQ(0, cudaMemcpyAsync(buf.buffer, data, size, + cudaMemcpyDeviceToDevice, *stream_)); } -void TensorRTEngine::SetITensor(const std::string& name, - nvinfer1::ITensor* tensor) { +void TensorRTEngine::SetITensor(const std::string &name, + nvinfer1::ITensor *tensor) { PADDLE_ENFORCE(tensor != nullptr); - PADDLE_ENFORCE_EQ(0, itensor_map_.count(name), "duplicate itensor name %s", + PADDLE_ENFORCE_EQ(0, itensor_map_.count(name), "duplicate ITensor name %s", name); itensor_map_[name] = tensor; } -nvinfer1::ITensor* TensorRTEngine::GetITensor(const std::string& name) { - PADDLE_ENFORCE(itensor_map_.count(name), "no itensor %s", name); +nvinfer1::ITensor *TensorRTEngine::GetITensor(const std::string &name) { + PADDLE_ENFORCE(itensor_map_.count(name), "no ITensor %s", name); return itensor_map_[name]; } +void TensorRTEngine::SetRuntimeBatch(size_t batch_size) { + runtime_batch_ = batch_size; +} + +int TensorRTEngine::GetRuntimeBatch() { return runtime_batch_; } + } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/engine.h b/paddle/fluid/inference/tensorrt/engine.h index ec919b943d..694468c419 100644 --- a/paddle/fluid/inference/tensorrt/engine.h +++ b/paddle/fluid/inference/tensorrt/engine.h @@ -21,6 +21,7 @@ limitations under the License. */ #include #include "paddle/fluid/inference/engine.h" #include "paddle/fluid/inference/tensorrt/helper.h" +#include "paddle/fluid/inference/utils/singleton.h" namespace paddle { namespace inference { @@ -37,23 +38,28 @@ class TensorRTEngine : public EngineBase { // Weight is model parameter. class Weight { public: - Weight(nvinfer1::DataType dtype, void* value, int num_elem) { + Weight(nvinfer1::DataType dtype, void* value, size_t num_elem) { w_.type = dtype; w_.values = value; w_.count = num_elem; } const nvinfer1::Weights& get() { return w_; } + std::vector dims; + private: nvinfer1::Weights w_; }; - TensorRTEngine(int max_batch, int max_workspace, cudaStream_t* stream, + TensorRTEngine(int max_batch, int max_workspace, + cudaStream_t* stream = nullptr, nvinfer1::ILogger& logger = NaiveLogger::Global()) : max_batch_(max_batch), max_workspace_(max_workspace), - stream_(stream), - logger_(logger) {} + stream_(stream ? stream : &default_stream_), + logger_(logger) { + cudaStreamCreate(&default_stream_); + } virtual ~TensorRTEngine(); @@ -87,16 +93,20 @@ class TensorRTEngine : public EngineBase { // these memory directly for acceleration, for example, output the converted // data directly to the buffer to save data copy overhead. // NOTE this should be used after calling `FreezeNetwork`. - void*& buffer(const std::string& name); + Buffer& buffer(const std::string& name) override; + + cudaStream_t* stream() { return stream_; } // Fill an input from CPU memory with name and size. - void SetInputFromCPU(const std::string& name, void* data, size_t size); + void SetInputFromCPU(const std::string& name, const void* data, size_t size); // TODO(Superjomn) is this method necessary given that buffer(xxx) can be // accessed directly. Fill an input from GPU memory with name and size. - void SetInputFromGPU(const std::string& name, void* data, size_t size); + void SetInputFromGPU(const std::string& name, const void* data, size_t size); // Get an output called name, the output of tensorrt is in GPU, so this method - // will just return the output's GPU memory address. + // Return the output's GPU memory address without copy. void* GetOutputInGPU(const std::string& name); + // Copy data into dst inside the GPU device. + void GetOutputInGPU(const std::string& name, void* dst, size_t max_size); // LOW EFFICENCY! Get output to CPU, this will trigger a memory copy from GPU // to CPU. void GetOutputInCPU(const std::string& name, void* dst, size_t max_size); @@ -107,16 +117,25 @@ class TensorRTEngine : public EngineBase { nvinfer1::ICudaEngine* engine() { return infer_engine_.get(); } nvinfer1::INetworkDefinition* network() { return infer_network_.get(); } + void SetRuntimeBatch(size_t batch_size); + int GetRuntimeBatch(); private: // the max batch size int max_batch_; + // the runtime batch size + static int runtime_batch_; // the max memory size the engine uses int max_workspace_; + + // batch size of the current data, will be updated each Executation. + int batch_size_{-1}; cudaStream_t* stream_; + // If stream_ is not set from outside, hold its own stream. + cudaStream_t default_stream_; nvinfer1::ILogger& logger_; - std::vector buffers_; + std::vector buffers_; // max data size for the buffers. std::unordered_map buffer_sizes_; std::unordered_map @@ -125,7 +144,11 @@ class TensorRTEngine : public EngineBase { // TensorRT related internal members template struct Destroyer { - void operator()(T* x) { x->destroy(); } + void operator()(T* x) { + if (x) { + x->destroy(); + } + } }; template using infer_ptr = std::unique_ptr>; @@ -149,6 +172,38 @@ class TensorRTEngine : public EngineBase { #define TRT_ENGINE_ADD_LAYER(engine__, layer__, ARGS...) \ engine__->network()->add##layer__(ARGS); +/* + * Helper to control the TensorRT engine's creation and deletion. + */ +class TRT_EngineManager { + public: + bool HasEngine(const std::string& name) const { + return engines_.count(name) != 0; + } + + // Get an engine called `name`. + TensorRTEngine* Get(const std::string& name) const { + return engines_.at(name).get(); + } + + // Create or get an engine called `name` + TensorRTEngine* Create(int max_batch, int max_workspace, cudaStream_t* stream, + const std::string& name) { + auto* p = new TensorRTEngine(max_batch, max_workspace, stream); + engines_[name].reset(p); + return p; + } + + void DeleteALl() { + for (auto& item : engines_) { + item.second.reset(nullptr); + } + } + + private: + std::unordered_map> engines_; +}; + } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tensorrt/helper.h b/paddle/fluid/inference/tensorrt/helper.h index 2b402cce60..b6e7968108 100644 --- a/paddle/fluid/inference/tensorrt/helper.h +++ b/paddle/fluid/inference/tensorrt/helper.h @@ -26,15 +26,6 @@ namespace tensorrt { namespace dy = paddle::platform::dynload; -static size_t AccumDims(nvinfer1::Dims dims) { - size_t num = dims.nbDims == 0 ? 0 : 1; - for (int i = 0; i < dims.nbDims; i++) { - PADDLE_ENFORCE_GT(dims.d[i], 0); - num *= dims.d[i]; - } - return num; -} - // TensorRT data type to size const int kDataTypeSize[] = { 4, // kFLOAT diff --git a/paddle/fluid/inference/tensorrt/test_engine.cc b/paddle/fluid/inference/tensorrt/test_engine.cc index a08b78f930..dc03702990 100644 --- a/paddle/fluid/inference/tensorrt/test_engine.cc +++ b/paddle/fluid/inference/tensorrt/test_engine.cc @@ -28,7 +28,7 @@ class TensorRTEngineTest : public ::testing::Test { protected: void SetUp() override { ASSERT_EQ(0, cudaStreamCreate(&stream_)); - engine_ = new TensorRTEngine(1, 1 << 10, &stream_); + engine_ = new TensorRTEngine(10, 1 << 10, &stream_); engine_->InitNetwork(); } @@ -71,12 +71,112 @@ TEST_F(TensorRTEngineTest, add_layer) { LOG(INFO) << "to get output"; float y_cpu; - engine_->GetOutputInCPU("y", &y_cpu, sizeof(float)); + engine_->GetOutputInCPU("y", &y_cpu, 1 * sizeof(float)); LOG(INFO) << "to checkout output"; ASSERT_EQ(y_cpu, x_v * 2 + 3); } +TEST_F(TensorRTEngineTest, add_layer_multi_dim) { + // Weight in CPU memory. + // It seems tensorrt FC use col-major: [[1.0, 3.3], [1.1, 4.4]] + // instead of row-major, which is [[1.0, 1.1], [3.3, 4.4]] + float raw_weight[4] = {1.0, 1.1, 3.3, 4.4}; + float raw_bias[2] = {1.3, 2.4}; + + TensorRTEngine::Weight weight(nvinfer1::DataType::kFLOAT, raw_weight, 4); + TensorRTEngine::Weight bias(nvinfer1::DataType::kFLOAT, raw_bias, 2); + auto* x = engine_->DeclareInput("x", nvinfer1::DataType::kFLOAT, + nvinfer1::DimsCHW{1, 2, 1}); + auto* fc_layer = TRT_ENGINE_ADD_LAYER(engine_, FullyConnected, *x, 2, + weight.get(), bias.get()); + PADDLE_ENFORCE(fc_layer != nullptr); + + engine_->DeclareOutput(fc_layer, 0, "y"); + engine_->FreezeNetwork(); + ASSERT_EQ(engine_->engine()->getNbBindings(), 2); + + float x_v[2] = {1.0, 2.0}; + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 2 * sizeof(float)); + engine_->Execute(1); + + LOG(INFO) << "to get output"; + float y_cpu[2] = {-1., -1.}; + + auto dims = engine_->GetITensor("y")->getDimensions(); + ASSERT_EQ(dims.nbDims, 3); + ASSERT_EQ(dims.d[0], 2); + ASSERT_EQ(dims.d[1], 1); + engine_->GetOutputInCPU("y", &y_cpu[0], 2 * sizeof(float)); + ASSERT_EQ(y_cpu[0], 4.5); + ASSERT_EQ(y_cpu[1], 14.5); +} + +TEST_F(TensorRTEngineTest, test_conv2d) { + // Weight in CPU memory. + float raw_weight[9] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; + float raw_bias[1] = {0}; + + TensorRTEngine::Weight weight(nvinfer1::DataType::kFLOAT, raw_weight, 9); + TensorRTEngine::Weight bias(nvinfer1::DataType::kFLOAT, raw_bias, 1); + auto* x = engine_->DeclareInput("x", nvinfer1::DataType::kFLOAT, + nvinfer1::Dims3{1, 3, 3}); + auto* conv_layer = + TRT_ENGINE_ADD_LAYER(engine_, Convolution, *x, 1, nvinfer1::DimsHW{3, 3}, + weight.get(), bias.get()); + PADDLE_ENFORCE(conv_layer != nullptr); + conv_layer->setStride(nvinfer1::DimsHW{1, 1}); + conv_layer->setPadding(nvinfer1::DimsHW{1, 1}); + + engine_->DeclareOutput(conv_layer, 0, "y"); + engine_->FreezeNetwork(); + ASSERT_EQ(engine_->engine()->getNbBindings(), 2); + + float x_v[18] = {1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, + 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0}; + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 18 * sizeof(float)); + engine_->Execute(2); + + LOG(INFO) << "to get output"; + float* y_cpu = new float[18]; + engine_->GetOutputInCPU("y", &y_cpu[0], 18 * sizeof(float)); + ASSERT_EQ(y_cpu[0], 4.0); + ASSERT_EQ(y_cpu[1], 6.0); +} + +TEST_F(TensorRTEngineTest, test_pool2d) { + // Weight in CPU memory. + auto* x = engine_->DeclareInput("x", nvinfer1::DataType::kFLOAT, + nvinfer1::Dims3{1, 2, 2}); + + nvinfer1::PoolingType pool_t = nvinfer1::PoolingType::kAVERAGE; + auto* pool_layer = + TRT_ENGINE_ADD_LAYER(engine_, Pooling, *const_cast(x), + pool_t, nvinfer1::DimsHW{2, 2}); + + PADDLE_ENFORCE(pool_layer != nullptr); + pool_layer->setStride(nvinfer1::DimsHW{1, 1}); + pool_layer->setPadding(nvinfer1::DimsHW{0, 0}); + + engine_->DeclareOutput(pool_layer, 0, "y"); + engine_->FreezeNetwork(); + ASSERT_EQ(engine_->engine()->getNbBindings(), 2); + + float x_v[8] = {1.0, 2.0, 5.0, 0.0, 2.0, 3.0, 5.0, 10.0}; + engine_->SetInputFromCPU("x", reinterpret_cast(&x_v), + 8 * sizeof(float)); + engine_->Execute(2); + + LOG(INFO) << "to get output"; + float* y_cpu = new float[2]; + engine_->GetOutputInCPU("y", &y_cpu[0], 2 * sizeof(float)); + + ASSERT_EQ(y_cpu[0], 2.0); + ASSERT_EQ(y_cpu[1], 5.0); +} + } // namespace tensorrt } // namespace inference } // namespace paddle diff --git a/paddle/fluid/inference/tests/book/CMakeLists.txt b/paddle/fluid/inference/tests/book/CMakeLists.txt index dbb81462b8..017fc4cd7b 100644 --- a/paddle/fluid/inference/tests/book/CMakeLists.txt +++ b/paddle/fluid/inference/tests/book/CMakeLists.txt @@ -17,7 +17,7 @@ function(inference_test TARGET_NAME) string(REGEX REPLACE "^_$" "" arg "${arg}") cc_test(test_inference_${TARGET_NAME}${arg} SRCS test_inference_${TARGET_NAME}.cc - DEPS paddle_fluid + DEPS paddle_fluid_origin ARGS --dirname=${PYTHON_TESTS_DIR}/book/${TARGET_NAME}${arg}.inference.model) set_tests_properties(test_inference_${TARGET_NAME}${arg} PROPERTIES DEPENDS test_${TARGET_NAME}) @@ -38,3 +38,11 @@ inference_test(recommender_system) #inference_test(rnn_encoder_decoder) #inference_test(understand_sentiment ARGS conv) inference_test(word2vec) + +# This is an unly work around to make this test run +# TODO(TJ): clean me up +cc_test(test_inference_nlp + SRCS test_inference_nlp.cc + DEPS paddle_fluid_origin + ARGS + --model_path=${PADDLE_BINARY_DIR}/python/paddle/fluid/tests/book/recognize_digits_mlp.inference.model) diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index c4fd1e298b..60c761c528 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -16,7 +16,6 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" -DEFINE_string(data_set, "cifar10", "Data set to test"); DEFINE_string(dirname, "", "Directory of the inference model."); DEFINE_string(fp16_dirname, "", "Directory of the float16 inference model."); DEFINE_int32(batch_size, 1, "Batch size of input data"); @@ -35,19 +34,19 @@ TEST(inference, image_classification) { // 0. Call `paddle::framework::InitDevices()` initialize all the devices // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + const bool is_combined = false; + std::vector> feed_target_shapes = + GetFeedTargetShapes(dirname, is_combined); + paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [0.0, 1.0]. - if (FLAGS_data_set == "cifar10") { - SetupTensor(&input, {FLAGS_batch_size, 3, 32, 32}, - static_cast(0), static_cast(1)); - } else if (FLAGS_data_set == "imagenet") { - SetupTensor(&input, {FLAGS_batch_size, 3, 224, 224}, - static_cast(0), static_cast(1)); - } else { - LOG(FATAL) << "Only cifar10 or imagenet is supported."; - } - + feed_target_shapes[0][0] = FLAGS_batch_size; + paddle::framework::DDim input_dims = + paddle::framework::make_ddim(feed_target_shapes[0]); + LOG(INFO) << input_dims; + SetupTensor(&input, input_dims, static_cast(0), + static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); @@ -60,7 +59,7 @@ TEST(inference, image_classification) { LOG(INFO) << "--- CPU Runs: ---"; LOG(INFO) << "Batch size is " << FLAGS_batch_size; TestInference( - dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat); + dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined); LOG(INFO) << output1.dims(); } @@ -73,7 +72,7 @@ TEST(inference, image_classification) { LOG(INFO) << "--- GPU Runs: ---"; LOG(INFO) << "Batch size is " << FLAGS_batch_size; TestInference( - dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat); + dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined); LOG(INFO) << output2.dims(); if (!FLAGS_skip_cpu) { diff --git a/paddle/fluid/inference/tests/book/test_inference_nlp.cc b/paddle/fluid/inference/tests/book/test_inference_nlp.cc new file mode 100644 index 0000000000..e2a3e9d46e --- /dev/null +++ b/paddle/fluid/inference/tests/book/test_inference_nlp.cc @@ -0,0 +1,221 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include // NOLINT +#include "gflags/gflags.h" +#include "gtest/gtest.h" +#include "paddle/fluid/inference/tests/test_helper.h" +#include "paddle/fluid/platform/cpu_helper.h" + +DEFINE_string(model_path, "", "Directory of the inference model."); +DEFINE_string(data_file, "", "File of input index data."); +DEFINE_int32(repeat, 100, "Running the inference program repeat times"); +DEFINE_bool(prepare_vars, true, "Prepare variables before executor"); +DEFINE_int32(num_threads, 1, "Number of threads should be used"); +DECLARE_bool(use_mkldnn); +DECLARE_int32(paddle_num_threads); + +inline double GetCurrentMs() { + struct timeval time; + gettimeofday(&time, NULL); + return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; +} + +// This function just give dummy data for recognize_digits model. +size_t DummyData(std::vector* out) { + paddle::framework::LoDTensor input; + SetupTensor(&input, {1, 1, 28, 28}, -1.f, 1.f); + out->emplace_back(input); + return 1; +} + +// Load the input word index data from file and save into LodTensor. +// Return the size of words. +size_t LoadData(std::vector* out, + const std::string& filename) { + if (filename.empty()) { + return DummyData(out); + } + + size_t sz = 0; + std::fstream fin(filename); + std::string line; + out->clear(); + while (getline(fin, line)) { + std::istringstream iss(line); + std::vector ids; + std::string field; + while (getline(iss, field, ' ')) { + ids.push_back(stoi(field)); + } + if (ids.size() >= 1024) { + // Synced with NLP guys, they will ignore input larger then 1024 + continue; + } + + paddle::framework::LoDTensor words; + paddle::framework::LoD lod{{0, ids.size()}}; + words.set_lod(lod); + int64_t* pdata = words.mutable_data( + {static_cast(ids.size()), 1}, paddle::platform::CPUPlace()); + memcpy(pdata, ids.data(), words.numel() * sizeof(int64_t)); + out->emplace_back(words); + sz += ids.size(); + } + return sz; +} + +// Split input data samples into small pieces jobs as balanced as possible, +// according to the number of threads. +void SplitData( + const std::vector& datasets, + std::vector>* jobs, + const int num_threads) { + size_t s = 0; + jobs->resize(num_threads); + while (s < datasets.size()) { + for (auto it = jobs->begin(); it != jobs->end(); it++) { + it->emplace_back(&datasets[s]); + s++; + if (s >= datasets.size()) { + break; + } + } + } +} + +void ThreadRunInfer( + const int tid, paddle::framework::Scope* scope, + const std::vector>& jobs) { + // maybe framework:ProgramDesc is not thread-safe + paddle::platform::CPUPlace place; + paddle::framework::Executor executor(place); + auto& sub_scope = scope->NewScope(); + auto inference_program = + paddle::inference::Load(&executor, scope, FLAGS_model_path); + + auto ctx = executor.Prepare(*inference_program, /*block_id*/ 0); + executor.CreateVariables(*inference_program, &sub_scope, /*block_id*/ 0); + + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + const std::vector& fetch_target_names = + inference_program->GetFetchTargetNames(); + + PADDLE_ENFORCE_EQ(fetch_target_names.size(), 1UL); + std::map fetch_targets; + paddle::framework::LoDTensor outtensor; + fetch_targets[fetch_target_names[0]] = &outtensor; + + std::map feed_targets; + PADDLE_ENFORCE_EQ(feed_target_names.size(), 1UL); + + auto& inputs = jobs[tid]; + auto start_ms = GetCurrentMs(); + for (size_t i = 0; i < inputs.size(); ++i) { + feed_targets[feed_target_names[0]] = inputs[i]; + executor.RunPreparedContext(ctx.get(), &sub_scope, &feed_targets, + &fetch_targets, false /*create_local_scope*/); + } + auto stop_ms = GetCurrentMs(); + scope->DeleteScope(&sub_scope); + LOG(INFO) << "Tid: " << tid << ", process " << inputs.size() + << " samples, avg time per sample: " + << (stop_ms - start_ms) / inputs.size() << " ms"; +} + +TEST(inference, nlp) { + if (FLAGS_model_path.empty()) { + LOG(FATAL) << "Usage: ./example --model_path=path/to/your/model"; + } + if (FLAGS_data_file.empty()) { + LOG(WARNING) << "No data file provided, will use dummy data!" + << "Note: if you use nlp model, please provide data file."; + } + LOG(INFO) << "Model Path: " << FLAGS_model_path; + LOG(INFO) << "Data File: " << FLAGS_data_file; + + std::vector datasets; + size_t num_total_words = LoadData(&datasets, FLAGS_data_file); + LOG(INFO) << "Number of samples (seq_len<1024): " << datasets.size(); + LOG(INFO) << "Total number of words: " << num_total_words; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + std::unique_ptr scope( + new paddle::framework::Scope()); + + paddle::platform::SetNumThreads(FLAGS_paddle_num_threads); + + double start_ms = 0, stop_ms = 0; + if (FLAGS_num_threads > 1) { + std::vector> jobs; + SplitData(datasets, &jobs, FLAGS_num_threads); + std::vector> threads; + start_ms = GetCurrentMs(); + for (int i = 0; i < FLAGS_num_threads; ++i) { + threads.emplace_back( + new std::thread(ThreadRunInfer, i, scope.get(), std::ref(jobs))); + } + for (int i = 0; i < FLAGS_num_threads; ++i) { + threads[i]->join(); + } + stop_ms = GetCurrentMs(); + } else { + // 1. Define place, executor, scope + paddle::platform::CPUPlace place; + paddle::framework::Executor executor(place); + + // 2. Initialize the inference_program and load parameters + std::unique_ptr inference_program; + inference_program = InitProgram(&executor, scope.get(), FLAGS_model_path, + /*model combined*/ false); + // always prepare context + std::unique_ptr ctx; + ctx = executor.Prepare(*inference_program, 0); + if (FLAGS_prepare_vars) { + executor.CreateVariables(*inference_program, scope.get(), 0); + } + // preapre fetch + const std::vector& fetch_target_names = + inference_program->GetFetchTargetNames(); + PADDLE_ENFORCE_EQ(fetch_target_names.size(), 1UL); + std::map fetch_targets; + paddle::framework::LoDTensor outtensor; + fetch_targets[fetch_target_names[0]] = &outtensor; + + // prepare feed + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + PADDLE_ENFORCE_EQ(feed_target_names.size(), 1UL); + std::map feed_targets; + + // feed data and run + start_ms = GetCurrentMs(); + for (size_t i = 0; i < datasets.size(); ++i) { + feed_targets[feed_target_names[0]] = &(datasets[i]); + executor.RunPreparedContext(ctx.get(), scope.get(), &feed_targets, + &fetch_targets, !FLAGS_prepare_vars); + } + stop_ms = GetCurrentMs(); + LOG(INFO) << "Tid: 0, process " << datasets.size() + << " samples, avg time per sample: " + << (stop_ms - start_ms) / datasets.size() << " ms"; + } + LOG(INFO) << "Total inference time with " << FLAGS_num_threads + << " threads : " << (stop_ms - start_ms) / 1000.0 + << " sec, QPS: " << datasets.size() / ((stop_ms - start_ms) / 1000); +} diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index af2a7a5620..695790a37d 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -22,6 +22,8 @@ limitations under the License. */ #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/profiler.h" +DECLARE_bool(use_mkldnn); + template void SetupTensor(paddle::framework::LoDTensor* input, paddle::framework::DDim dims, T lower, T upper) { @@ -89,6 +91,50 @@ void CheckError(const paddle::framework::LoDTensor& output1, EXPECT_EQ(count, 0U) << "There are " << count << " different elements."; } +std::unique_ptr InitProgram( + paddle::framework::Executor* executor, paddle::framework::Scope* scope, + const std::string& dirname, const bool is_combined = false) { + std::unique_ptr inference_program; + if (is_combined) { + // All parameters are saved in a single file. + // Hard-coding the file names of program and parameters in unittest. + // The file names should be consistent with that used in Python API + // `fluid.io.save_inference_model`. + std::string prog_filename = "__model_combined__"; + std::string param_filename = "__params_combined__"; + inference_program = + paddle::inference::Load(executor, scope, dirname + "/" + prog_filename, + dirname + "/" + param_filename); + } else { + // Parameters are saved in separate files sited in the specified + // `dirname`. + inference_program = paddle::inference::Load(executor, scope, dirname); + } + return inference_program; +} + +std::vector> GetFeedTargetShapes( + const std::string& dirname, const bool is_combined = false) { + auto place = paddle::platform::CPUPlace(); + auto executor = paddle::framework::Executor(place); + auto* scope = new paddle::framework::Scope(); + + auto inference_program = InitProgram(&executor, scope, dirname, is_combined); + auto& global_block = inference_program->Block(0); + + const std::vector& feed_target_names = + inference_program->GetFeedTargetNames(); + std::vector> feed_target_shapes; + for (size_t i = 0; i < feed_target_names.size(); ++i) { + auto* var = global_block.FindVar(feed_target_names[i]); + std::vector var_shape = var->GetShape(); + feed_target_shapes.push_back(var_shape); + } + + delete scope; + return feed_target_shapes; +} + template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, @@ -105,7 +151,7 @@ void TestInference(const std::string& dirname, state = paddle::platform::ProfilerState::kCPU; } else { #ifdef PADDLE_WITH_CUDA - state = paddle::platform::ProfilerState::kCUDA; + state = paddle::platform::ProfilerState::kAll; // The default device_id of paddle::platform::CUDAPlace is 0. // Users can get the device_id using: // int device_id = place.GetDeviceId(); @@ -124,26 +170,11 @@ void TestInference(const std::string& dirname, paddle::platform::RecordEvent record_event( "init_program", paddle::platform::DeviceContextPool::Instance().Get(place)); - - if (is_combined) { - // All parameters are saved in a single file. - // Hard-coding the file names of program and parameters in unittest. - // The file names should be consistent with that used in Python API - // `fluid.io.save_inference_model`. - std::string prog_filename = "__model_combined__"; - std::string param_filename = "__params_combined__"; - inference_program = paddle::inference::Load( - &executor, scope, dirname + "/" + prog_filename, - dirname + "/" + param_filename); - } else { - // Parameters are saved in separate files sited in the specified - // `dirname`. - inference_program = paddle::inference::Load(&executor, scope, dirname); - } + inference_program = InitProgram(&executor, scope, dirname, is_combined); } // Disable the profiler and print the timing information paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kDefault, - "load_program_profiler.txt"); + "load_program_profiler"); paddle::platform::ResetProfiler(); // 3. Get the feed_target_names and fetch_target_names @@ -165,7 +196,10 @@ void TestInference(const std::string& dirname, fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; } - // 6. Run the inference program + // 6. If export Flags_use_mkldnn=True, use mkldnn related ops. + if (FLAGS_use_mkldnn) executor.EnableMKLDNN(*inference_program); + + // 7. Run the inference program { if (!CreateVars) { // If users don't want to create and destroy variables every time they @@ -176,13 +210,14 @@ void TestInference(const std::string& dirname, // Ignore the profiling results of the first run std::unique_ptr ctx; + bool CreateLocalScope = CreateVars; if (PrepareContext) { ctx = executor.Prepare(*inference_program, 0); executor.RunPreparedContext(ctx.get(), scope, &feed_targets, - &fetch_targets, CreateVars); + &fetch_targets, CreateLocalScope, CreateVars); } else { executor.Run(*inference_program, scope, &feed_targets, &fetch_targets, - CreateVars); + CreateLocalScope, CreateVars); } // Enable the profiler @@ -198,17 +233,17 @@ void TestInference(const std::string& dirname, // Note: if you change the inference_program, you need to call // executor.Prepare() again to get a new ExecutorPrepareContext. executor.RunPreparedContext(ctx.get(), scope, &feed_targets, - &fetch_targets, CreateVars); + &fetch_targets, CreateLocalScope, + CreateVars); } else { executor.Run(*inference_program, scope, &feed_targets, &fetch_targets, - CreateVars); + CreateLocalScope, CreateVars); } } // Disable the profiler and print the timing information paddle::platform::DisableProfiler( - paddle::platform::EventSortingKey::kDefault, - "run_inference_profiler.txt"); + paddle::platform::EventSortingKey::kDefault, "run_inference_profiler"); paddle::platform::ResetProfiler(); } diff --git a/paddle/fluid/memory/detail/buddy_allocator.cc b/paddle/fluid/memory/detail/buddy_allocator.cc index 4194ba1979..c2f45fdc99 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.cc +++ b/paddle/fluid/memory/detail/buddy_allocator.cc @@ -15,12 +15,17 @@ limitations under the License. */ #include "paddle/fluid/memory/detail/buddy_allocator.h" #include "glog/logging.h" +DEFINE_bool(free_idle_memory, false, + "If it is true, Paddle will try to free idle memory trunks during " + "running time."); + namespace paddle { namespace memory { namespace detail { -BuddyAllocator::BuddyAllocator(SystemAllocator* system_allocator, - size_t min_chunk_size, size_t max_chunk_size) +BuddyAllocator::BuddyAllocator( + std::unique_ptr system_allocator, size_t min_chunk_size, + size_t max_chunk_size) : min_chunk_size_(min_chunk_size), max_chunk_size_(max_chunk_size), cache_(system_allocator->UseGpu()), @@ -151,13 +156,14 @@ void BuddyAllocator::Free(void* p) { pool_.insert( IndexSizeAddress(block->index(cache_), block->total_size(cache_), block)); - // Clean up if existing too much free memory - - // Prefer freeing fallback allocation first - CleanIdleFallBackAlloc(); + if (FLAGS_free_idle_memory) { + // Clean up if existing too much free memory + // Prefer freeing fallback allocation first + CleanIdleFallBackAlloc(); - // Free normal allocation - CleanIdleNormalAlloc(); + // Free normal allocation + CleanIdleNormalAlloc(); + } } size_t BuddyAllocator::Used() { return total_used_; } diff --git a/paddle/fluid/memory/detail/buddy_allocator.h b/paddle/fluid/memory/detail/buddy_allocator.h index 2f39d774d6..f0c83efc23 100644 --- a/paddle/fluid/memory/detail/buddy_allocator.h +++ b/paddle/fluid/memory/detail/buddy_allocator.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include // NOLINT #include #include @@ -32,8 +33,8 @@ namespace detail { class BuddyAllocator { public: - BuddyAllocator(SystemAllocator* system_allocator, size_t min_chunk_size, - size_t max_chunk_size); + BuddyAllocator(std::unique_ptr system_allocator, + size_t min_chunk_size, size_t max_chunk_size); ~BuddyAllocator(); @@ -103,7 +104,7 @@ class BuddyAllocator { private: /*! Allocate CPU/GPU memory from system */ - SystemAllocator* system_allocator_; + std::unique_ptr system_allocator_; std::mutex mutex_; }; diff --git a/paddle/fluid/memory/detail/system_allocator.cc b/paddle/fluid/memory/detail/system_allocator.cc index d539052916..9b1ab1e228 100644 --- a/paddle/fluid/memory/detail/system_allocator.cc +++ b/paddle/fluid/memory/detail/system_allocator.cc @@ -43,14 +43,16 @@ void* CPUAllocator::Alloc(size_t* index, size_t size) { *index = 0; // unlock memory - void* p; + void* p = nullptr; #ifdef PADDLE_WITH_MKLDNN // refer to https://github.com/01org/mkl-dnn/blob/master/include/mkldnn.hpp // memory alignment - PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0); + PADDLE_ENFORCE_EQ(posix_memalign(&p, 4096ul, size), 0, "Alloc %ld error!", + size); #else - PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0); + PADDLE_ENFORCE_EQ(posix_memalign(&p, 32ul, size), 0, "Alloc %ld error!", + size); #endif PADDLE_ENFORCE(p, "Fail to allocate CPU memory: size = %d .", size); diff --git a/paddle/fluid/memory/malloc.cc b/paddle/fluid/memory/malloc.cc index 0c74f62de5..7c800b3c16 100644 --- a/paddle/fluid/memory/malloc.cc +++ b/paddle/fluid/memory/malloc.cc @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include + #include "paddle/fluid/memory/malloc.h" #include "glog/logging.h" @@ -20,6 +22,12 @@ limitations under the License. */ #include "paddle/fluid/memory/detail/system_allocator.h" #include "paddle/fluid/platform/gpu_info.h" +DEFINE_bool(init_allocated_mem, false, + "It is a mistake that the values of the memory allocated by " + "BuddyAllocator are always zeroed in some op's implementation. " + "To find this error in time, we use init_allocated_mem to indicate " + "that initializing the allocated memory with a small value " + "during unit testing."); DECLARE_double(fraction_of_gpu_memory_to_use); namespace paddle { @@ -28,12 +36,15 @@ namespace memory { using BuddyAllocator = detail::BuddyAllocator; BuddyAllocator* GetCPUBuddyAllocator() { + static std::once_flag init_flag; static detail::BuddyAllocator* a = nullptr; - if (a == nullptr) { - a = new detail::BuddyAllocator(new detail::CPUAllocator, - platform::CpuMinChunkSize(), - platform::CpuMaxChunkSize()); - } + + std::call_once(init_flag, []() { + a = new detail::BuddyAllocator( + std::unique_ptr(new detail::CPUAllocator), + platform::CpuMinChunkSize(), platform::CpuMaxChunkSize()); + }); + return a; } @@ -41,6 +52,9 @@ template <> void* Alloc(platform::CPUPlace place, size_t size) { VLOG(10) << "Allocate " << size << " bytes on " << platform::Place(place); void* p = GetCPUBuddyAllocator()->Alloc(size); + if (FLAGS_init_allocated_mem) { + memset(p, 0xEF, size); + } VLOG(10) << " pointer=" << p; return p; } @@ -59,27 +73,33 @@ size_t Used(platform::CPUPlace place) { #ifdef PADDLE_WITH_CUDA BuddyAllocator* GetGPUBuddyAllocator(int gpu_id) { - static BuddyAllocator** as = NULL; - if (as == NULL) { + static std::once_flag init_flag; + static detail::BuddyAllocator** a_arr = nullptr; + + std::call_once(init_flag, [gpu_id]() { int gpu_num = platform::GetCUDADeviceCount(); - as = new BuddyAllocator*[gpu_num]; - for (int gpu = 0; gpu < gpu_num; gpu++) { - as[gpu] = nullptr; + PADDLE_ENFORCE(gpu_id < gpu_num, "gpu_id:%d should < gpu_num:%d", gpu_id, + gpu_num); + + a_arr = new BuddyAllocator*[gpu_num]; + for (int i = 0; i < gpu_num; i++) { + a_arr[i] = nullptr; + platform::SetDeviceId(i); + a_arr[i] = new BuddyAllocator( + std::unique_ptr(new detail::GPUAllocator(i)), + platform::GpuMinChunkSize(), platform::GpuMaxChunkSize()); + + VLOG(10) << "\n\nNOTE: each GPU device use " + << FLAGS_fraction_of_gpu_memory_to_use * 100 + << "% of GPU memory.\n" + << "You can set GFlags environment variable '" + << "FLAGS_fraction_of_gpu_memory_to_use" + << "' to change the fraction of GPU usage.\n\n"; } - } + }); + platform::SetDeviceId(gpu_id); - if (!as[gpu_id]) { - as[gpu_id] = new BuddyAllocator(new detail::GPUAllocator(gpu_id), - platform::GpuMinChunkSize(), - platform::GpuMaxChunkSize()); - VLOG(10) << "\n\nNOTE: each GPU device use " - << FLAGS_fraction_of_gpu_memory_to_use * 100 - << "% of GPU memory.\n" - << "You can set GFlags environment variable '" - << "FLAGS_fraction_of_gpu_memory_to_use" - << "' to change the fraction of GPU usage.\n\n"; - } - return as[gpu_id]; + return a_arr[gpu_id]; } template <> @@ -104,6 +124,9 @@ void* Alloc(platform::CUDAPlace place, size_t size) { LOG(WARNING) << "GPU memory used: " << Used(place); platform::SetDeviceId(cur_dev); } + if (FLAGS_init_allocated_mem) { + cudaMemset(ptr, 0xEF, size); + } return ptr; } @@ -113,12 +136,16 @@ void Free(platform::CUDAPlace place, void* p) { } BuddyAllocator* GetCUDAPinnedBuddyAllocator() { - static BuddyAllocator* ba = NULL; - if (ba == NULL) { - ba = new BuddyAllocator(new detail::CUDAPinnedAllocator, + static std::once_flag init_flag; + static BuddyAllocator* ba = nullptr; + + std::call_once(init_flag, []() { + ba = new BuddyAllocator(std::unique_ptr( + new detail::CUDAPinnedAllocator), platform::CUDAPinnedMinChunkSize(), platform::CUDAPinnedMaxChunkSize()); - } + }); + return ba; } @@ -137,6 +164,9 @@ void* Alloc(platform::CUDAPinnedPlace place, LOG(WARNING) << "cudaMallocHost Cannot allocate " << size << " bytes in CUDAPinnedPlace"; } + if (FLAGS_init_allocated_mem) { + memset(ptr, 0xEF, size); + } return ptr; } diff --git a/paddle/fluid/operators/.flatten_op.cc.swp b/paddle/fluid/operators/.flatten_op.cc.swp new file mode 100644 index 0000000000..3395b6074b Binary files /dev/null and b/paddle/fluid/operators/.flatten_op.cc.swp differ diff --git a/paddle/fluid/operators/CMakeLists.txt b/paddle/fluid/operators/CMakeLists.txt index 256aded8ca..ff0e989464 100644 --- a/paddle/fluid/operators/CMakeLists.txt +++ b/paddle/fluid/operators/CMakeLists.txt @@ -166,6 +166,10 @@ function(op_library TARGET) # NOTE(*): activation use macro to regist the kernels, set use_op manually. if(${TARGET} STREQUAL "activation") file(APPEND ${pybind_file} "USE_OP(relu);\n") + elseif(${TARGET} STREQUAL "fake_dequantize") + file(APPEND ${pybind_file} "USE_OP(fake_dequantize_max_abs);\n") + elseif(${TARGET} STREQUAL "tensorrt_engine_op") + message(STATUS "Pybind skips [tensorrt_engine_op], for this OP is only used in inference") else() file(APPEND ${pybind_file} "USE_OP(${TARGET});\n") endif() @@ -182,32 +186,71 @@ else() set(DEPS_OPS ${DEPS_OPS} nccl_op) endif() -add_subdirectory(detail) +set(DISTRIBUTE_DEPS "") if(WITH_DISTRIBUTE) - set(DISTRIBUTE_DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf) + add_subdirectory(distributed) + + set(DISTRIBUTE_DEPS "") + if(WITH_GRPC) + set(DISTRIBUTE_DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf node) + else() + set(DISTRIBUTE_DEPS sendrecvop_brpc brpc leveldb snappystream snappy protobuf ssl crypto zlib node) + if(WITH_BRPC_RDMA) + find_library(IBVERBS_LIBRARY NAMES ibverbs) + ADD_LIBRARY(ibverbs SHARED IMPORTED GLOBAL) + SET_PROPERTY(TARGET ibverbs PROPERTY IMPORTED_LOCATION ${IBVERBS_LIBRARY}) + + + find_library(RDMACM_LIBRARY NAMES rdmacm) + ADD_LIBRARY(rdmacm SHARED IMPORTED GLOBAL) + SET_PROPERTY(TARGET rdmacm PROPERTY IMPORTED_LOCATION ${RDMACM_LIBRARY}) + + set(DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} ibverbs rdmacm) + endif() + endif() + set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") - op_library(send_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(send_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - op_library(prefetch_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(prefetch_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - op_library(recv_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(recv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - op_library(listen_and_serv_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(listen_and_serv_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - op_library(send_vars_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(send_vars_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - op_library(send_barrier_op DEPS ${DISTRIBUTE_DEPS}) - set_source_files_properties(send_barrier_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op listen_and_serv_op sum_op executor) + foreach(dist_op "prefetch_op" "checkpoint_notify_op" "listen_and_serv_op" "send_op" "recv_op" "send_barrier_op" "fetch_barrier_op") + op_library(${dist_op} DEPS ${DISTRIBUTE_DEPS}) + set_source_files_properties(${dist_op}.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + endforeach() + + #set_source_files_properties(send_recv_op_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + #cc_test(test_send_recv SRCS send_recv_op_test.cc DEPS prefetch_op send_op + # listen_and_serv_op sum_op executor SERIAL) + if(WITH_GPU) + set_source_files_properties(test_send_nccl_id.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + cc_test(test_send_nccl_id SRCS test_send_nccl_id.cc DEPS listen_and_serv_op ${DISTRIBUTE_DEPS} executor SERIAL) + if(WITH_GRPC) + op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_grpc) + else() + op_library(gen_nccl_id_op DEPS nccl_common sendrecvop_brpc) + endif() + set_source_files_properties(gen_nccl_id_op.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + else() + set(DEPS_OPS ${DEPS_OPS} gen_nccl_id_op) + endif() else() - set(DEPS_OPS ${DEPS_OPS} send_op prefetch_op recv_op listen_and_serv_op send_vars_op send_barrier_op) + set(DEPS_OPS ${DEPS_OPS} checkpoint_notify_op prefetch_op recv_op listen_and_serv_op send_op send_barrier_op fetch_barrier_op gen_nccl_id_op) endif() op_library(cross_entropy_op DEPS cross_entropy) -op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) +if(WITH_GPU) + op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax cub) +else() + op_library(softmax_with_cross_entropy_op DEPS cross_entropy softmax) +endif() + op_library(softmax_op DEPS softmax) op_library(sequence_softmax_op DEPS softmax) +if (WITH_GPU AND TENSORRT_FOUND) + op_library(tensorrt_engine_op DEPS tensorrt_engine tensorrt_converter) + nv_test(test_tensorrt_engine_op SRCS tensorrt_engine_op_test.cc + DEPS tensorrt_engine_op + analysis) +else() + set(DEPS_OPS ${DEPS_OPS} tensorrt_engine_op) +endif() op_library(sum_op DEPS selected_rows_functor) op_library(sgd_op DEPS selected_rows_functor) op_library(print_op DEPS lod_tensor) @@ -223,15 +266,21 @@ op_library(max_sequence_len_op DEPS lod_rank_table) op_library(sequence_conv_op DEPS context_project) op_library(sequence_pool_op DEPS sequence_pooling) op_library(lstm_op DEPS sequence2batch lstm_compute) +op_library(hierarchical_sigmoid_op DEPS matrix_bit_code) op_library(lstmp_op DEPS sequence2batch lstm_compute) op_library(gru_op DEPS sequence2batch gru_compute) op_library(recurrent_op DEPS executor) op_library(warpctc_op DEPS dynload_warpctc sequence_padding sequence_scale) op_library(cos_sim_op DEPS cos_sim_functor) op_library(parallel_do_op DEPS executor) +op_library(unsqueeze_op DEPS reshape_op) +op_library(squeeze_op DEPS reshape_op) +op_library(extract_rows_op DEPS memory) +op_library(flatten_op DEPS reshape_op) if (WITH_GPU) op_library(conv_op DEPS vol2col depthwise_conv im2col) + op_library(layer_norm_op DEPS cub) else() op_library(conv_op DEPS vol2col im2col) endif() @@ -268,7 +317,13 @@ foreach(src ${READER_LIBRARY}) set(OP_LIBRARY ${src} ${OP_LIBRARY}) endforeach() +add_subdirectory(detection) +foreach(src ${DETECTION_LIBRARY}) + set(OP_LIBRARY ${src} ${OP_LIBRARY}) +endforeach() + set(GLOB_OP_LIB ${OP_LIBRARY} CACHE INTERNAL "Global OP library") +set(GLOB_DISTRIBUTE_DEPS ${DISTRIBUTE_DEPS} CACHE INTERNAL "distributed dependency") cc_test(gather_test SRCS gather_test.cc DEPS tensor) cc_test(scatter_test SRCS scatter_test.cc DEPS tensor) diff --git a/paddle/fluid/operators/accuracy_op.cc b/paddle/fluid/operators/accuracy_op.cc index ac10d759fe..42fcace179 100644 --- a/paddle/fluid/operators/accuracy_op.cc +++ b/paddle/fluid/operators/accuracy_op.cc @@ -63,8 +63,7 @@ class AccuracyOp : public framework::OperatorWithKernel { class AccuracyOpMaker : public framework::OpProtoAndCheckerMaker { public: - AccuracyOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { // TODO(typhoonzero): support both inference value and indices. AddInput("Out", "The network output of topk (inferences)"); AddInput("Indices", "The the network output of topk (indices)"); diff --git a/paddle/fluid/operators/activation_mkldnn_op.cc b/paddle/fluid/operators/activation_mkldnn_op.cc index ab7c612271..137bca5e2b 100644 --- a/paddle/fluid/operators/activation_mkldnn_op.cc +++ b/paddle/fluid/operators/activation_mkldnn_op.cc @@ -12,177 +12,321 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include "mkldnn.hpp" #include "paddle/fluid/operators/activation_op.h" -#include "paddle/fluid/operators/mkldnn_activation_op.h" +#include "paddle/fluid/platform/mkldnn_helper.h" namespace paddle { namespace operators { -using paddle::framework::Tensor; -using paddle::platform::MKLDNNDeviceContext; +using framework::DataLayout; +using framework::Tensor; +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::stream; +using platform::GetMKLDNNFormat; +using platform::MKLDNNDeviceContext; +using platform::to_void_cast; namespace { -template -void eltwise_forward(const ExecContext &ctx, mkldnn::algorithm algorithm, - const T alpha = 0, const T beta = 0) { +std::string gethash(const mkldnn::memory::dims &operand_dims, + const mkldnn::algorithm algorithm) { + auto dim2str = [](const mkldnn::memory::dims &operand_dims) { + std::string dstr = ""; + for (size_t i = 0; i < operand_dims.size(); ++i) { + dstr += std::to_string(operand_dims[i]) + "-"; + } + return dstr; + }; + return dim2str(operand_dims) + std::to_string(algorithm); +} +} // namespace + +template +class MKLDNNActivationKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *x = ctx.Input("X"); + PADDLE_ENFORCE(x->layout() == DataLayout::kMKLDNN && + x->format() != memory::format::format_undef, + "Wrong layout/format set for Input x tensor"); + + Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto &attr : attrs) { + *attr.second = ctx.Attr(attr.first); + } + functor(ctx); + } +}; + +template +class MKLDNNActivationGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const auto *diff_y = ctx.Input(framework::GradVarName("Out")); + PADDLE_ENFORCE(diff_y->layout() == DataLayout::kMKLDNN && + diff_y->format() != memory::format::format_undef, + "Wrong layout/format set for Input OutGrad tensor"); + + Functor functor; + + auto attrs = functor.GetAttrs(); + for (auto &attr : attrs) { + *attr.second = ctx.Attr(attr.first); + } + functor(ctx); + } +}; + +template +void eltwise_forward(const framework::ExecutionContext &ctx, + mkldnn::algorithm algorithm, const T alpha = 0, + const T beta = 0) { PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), "It must use CPUPlace."); - auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); - // get buffers - const auto *src = ctx.template Input("X"); - const auto *src_data = src->template data(); + const auto *x = ctx.Input("X"); + auto *y = ctx.Output("Out"); - auto *dst = ctx.template Output("Out"); - const T *dst_data = dst->template mutable_data(ctx.GetPlace()); + const T *x_data = x->data(); + T *y_data = y->mutable_data(ctx.GetPlace()); - // get memory dim - PADDLE_ENFORCE(src->dims().size() == 2 || src->dims().size() == 4, + PADDLE_ENFORCE(x->dims().size() == 2 || x->dims().size() == 4, "Input dim must be with 2 or 4"); - std::vector src_tz = framework::vectorize2int(src->dims()); - - // create memory description - auto data_md = src_tz.size() == 2 - ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nc) - : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - - // create memory primitives - auto src_memory = - mkldnn::memory({data_md, mkldnn_engine}, - static_cast(const_cast(src_data))); - auto dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, - static_cast(const_cast(dst_data))); - auto forward_desc = mkldnn::eltwise_forward::desc( - mkldnn::prop_kind::forward_training, algorithm, data_md, alpha, beta); - - // save prim desc into global device context to be referred in backward path - const std::string key = ctx.op().Output("Out"); - const std::string key_eltwise_pd = key + "@eltwise_pd"; - auto forward_pd = std::make_shared( - forward_desc, mkldnn_engine); - dev_ctx.SetBlob(key_eltwise_pd, forward_pd); + std::vector src_tz = framework::vectorize2int(x->dims()); - auto eltwise = mkldnn::eltwise_forward(*forward_pd, src_memory, dst_memory); + auto src_format = + src_tz.size() == 2 ? mkldnn::memory::format::nc : x->format(); + + const std::string key = gethash(src_tz, algorithm); + const std::string key_src_data = + key + ctx.op().Output("Out") + "@eltwise_fwd_src_data"; + const std::string key_src_layout = + key + ctx.op().Output("Out") + "@eltwise_fwd_src_layout"; + const std::string key_with_layout = key + std::to_string(src_format); + const std::string key_src_mem = key_with_layout + "@eltwise_fwd_src_mem"; + const std::string key_dst_mem = key_with_layout + "@eltwise_fwd_dst_mem"; + const std::string key_fwd = key_with_layout + "@eltwise_fwd"; + const std::string key_fwd_pd = key_with_layout + "@eltwise_fwd_pd"; + + // save input data and layout to be referred in backward path + auto p_src_data = std::make_shared(x_data); + dev_ctx.SetBlob(key_src_data, p_src_data); + auto p_src_layout = std::make_shared(src_format); + dev_ctx.SetBlob(key_src_layout, p_src_layout); + + auto p_fwd = std::static_pointer_cast( + dev_ctx.GetBlob(key_fwd)); + + std::shared_ptr dst_memory; + + if (p_fwd == nullptr) { + // create mkldnn memory for input X + auto src_md = platform::MKLDNNMemDesc( + src_tz, platform::MKLDNNGetDataType(), src_format); + auto src_memory = std::shared_ptr( + new memory({src_md, mkldnn_engine}, to_void_cast(x_data))); + // save src_memory to be referred in backward path + dev_ctx.SetBlob(key_src_mem, src_memory); + + // create primitive descriptor for activation forward and save it + auto forward_desc = mkldnn::eltwise_forward::desc( + mkldnn::prop_kind::forward_training, algorithm, + src_memory->get_primitive_desc().desc(), alpha, beta); + auto forward_pd = std::make_shared( + forward_desc, mkldnn_engine); + + // save prim desc into global device context to be referred in backward path + dev_ctx.SetBlob(key_fwd_pd, forward_pd); + + // create mkldnn memory for output y + dst_memory = + std::make_shared(forward_pd->dst_primitive_desc(), y_data); + + dev_ctx.SetBlob(key_dst_mem, dst_memory); + + // create activation primitive + p_fwd = std::make_shared(*forward_pd, *src_memory, + *dst_memory); + dev_ctx.SetBlob(key_fwd, p_fwd); + } else { + // primitives already exist + auto src_memory = + std::static_pointer_cast(dev_ctx.GetBlob(key_src_mem)); + PADDLE_ENFORCE(src_memory != nullptr, + "Fail to find eltwise src_memory in device context."); + dst_memory = + std::static_pointer_cast(dev_ctx.GetBlob(key_dst_mem)); + PADDLE_ENFORCE(dst_memory != nullptr, + "Fail to find eltwise dst_memory in device context."); + + src_memory->set_data_handle(platform::to_void_cast(x_data)); + dst_memory->set_data_handle(y_data); + } // push primitive to stream and wait until it's executed - std::vector pipeline = {eltwise}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + std::vector pipeline; + pipeline.push_back(*p_fwd); + stream(stream::kind::eager).submit(pipeline).wait(); + + y->set_layout(DataLayout::kMKLDNN); + y->set_format(GetMKLDNNFormat(*dst_memory)); } -template -void eltwise_grad(const ExecContext &ctx, mkldnn::algorithm algorithm, - const T alpha = 0, const T beta = 0) { +template +void eltwise_grad(const framework::ExecutionContext &ctx, + mkldnn::algorithm algorithm, const T alpha = 0, + const T beta = 0) { auto &dev_ctx = ctx.template device_context(); const auto &mkldnn_engine = dev_ctx.GetEngine(); - // get buffers - const auto *x = ctx.template Input("X"); - const auto *src = x->template data(); - - auto *dout = ctx.template Input(framework::GradVarName("Out")); - const auto *diff_dst = dout->template data(); + const auto *diff_y = ctx.Input(framework::GradVarName("Out")); + auto *diff_x = ctx.Output(framework::GradVarName("X")); + + const T *diff_y_data = diff_y->data(); + T *diff_x_data = diff_x->mutable_data(ctx.GetPlace()); + + std::vector diff_dst_tz = framework::vectorize2int(diff_y->dims()); + + auto diff_y_format = + diff_dst_tz.size() == 2 ? mkldnn::memory::format::nc : diff_y->format(); + + const std::string key = gethash(diff_dst_tz, algorithm); + const std::string key_src_data = + key + ctx.op().Input("Out") + "@eltwise_fwd_src_data"; + const std::string key_src_layout = + key + ctx.op().Input("Out") + "@eltwise_fwd_src_layout"; + const auto p_src_layout = + std::static_pointer_cast(dev_ctx.GetBlob(key_src_layout)); + const std::string key_src_mem = + key + std::to_string(*p_src_layout) + "@eltwise_fwd_src_mem"; + const std::string key_fwd_pd = + key + std::to_string(*p_src_layout) + "@eltwise_fwd_pd"; + const std::string key_with_layouts = + key + std::to_string(*p_src_layout) + "-" + std::to_string(diff_y_format); + const std::string key_diff_src_mem = + key_with_layouts + "@eltwise_diff_src_mem"; + const std::string key_diff_dst_mem = + key_with_layouts + "@eltwise_diff_dst_mem"; + const std::string key_grad = key_with_layouts + "@eltwise_grad"; + + const auto p_src_data = + std::static_pointer_cast(dev_ctx.GetBlob(key_src_data)); - auto *dx = - ctx.template Output(framework::GradVarName("X")); - const T *diff_src = dx->template mutable_data(ctx.GetPlace()); - - // get memory dim - std::vector src_tz = framework::vectorize2int(x->dims()); - - // create memory description - auto data_md = src_tz.size() == 2 - ? platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nc) - : platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - - // create memory primitives - auto src_memory = mkldnn::memory( - {data_md, mkldnn_engine}, static_cast(const_cast(src))); - auto diff_src_memory = - mkldnn::memory({data_md, mkldnn_engine}, - static_cast(const_cast(diff_src))); - auto diff_dst_memory = - mkldnn::memory({data_md, mkldnn_engine}, - static_cast(const_cast(diff_dst))); - - auto backward_desc = - mkldnn::eltwise_backward::desc(algorithm, data_md, data_md, alpha, beta); - - // retrieve eltwise primitive desc from device context - const std::string key = ctx.op().Input("Out"); - const std::string key_eltwise_pd = key + "@eltwise_pd"; - const std::shared_ptr forward_pd = dev_ctx.GetBlob(key_eltwise_pd); - PADDLE_ENFORCE(forward_pd != nullptr, - "Fail to find eltwise_pd in device context"); - auto *p_forward_pd = - static_cast(forward_pd.get()); - - auto eltwise_bwd_prim_desc = mkldnn::eltwise_backward::primitive_desc( - backward_desc, mkldnn_engine, *p_forward_pd); - - auto eltwise_bwd = mkldnn::eltwise_backward(eltwise_bwd_prim_desc, src_memory, - diff_dst_memory, diff_src_memory); + auto src_memory = + std::static_pointer_cast(dev_ctx.GetBlob(key_src_mem)); + PADDLE_ENFORCE(src_memory != nullptr, + "Fail to find src_memory in device context"); + src_memory->set_data_handle(*p_src_data.get()); + + std::shared_ptr diff_src_memory; + + auto p_grad = std::static_pointer_cast( + dev_ctx.GetBlob(key_grad)); + + if (p_grad == nullptr) { + // create mkldnn memory for input diff_y + auto diff_dst_md = platform::MKLDNNMemDesc( + diff_dst_tz, platform::MKLDNNGetDataType(), diff_y_format); + auto diff_dst_memory = std::shared_ptr( + new memory({diff_dst_md, mkldnn_engine}, to_void_cast(diff_y_data))); + dev_ctx.SetBlob(key_diff_dst_mem, diff_dst_memory); + + // retrieve eltwise primitive desc from device context + auto forward_pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_fwd_pd)); + PADDLE_ENFORCE(forward_pd != nullptr, + "Fail to find eltwise_fwd_pd in device context"); + + // ceate primitive descriptor for activation backward + auto backward_desc = mkldnn::eltwise_backward::desc( + algorithm, diff_dst_memory->get_primitive_desc().desc(), + src_memory->get_primitive_desc().desc(), alpha, beta); + auto backward_pd = mkldnn::eltwise_backward::primitive_desc( + backward_desc, mkldnn_engine, *forward_pd); + + // create mkldnn memory for output diff_src + diff_src_memory = std::make_shared( + backward_pd.diff_src_primitive_desc(), diff_x_data); + dev_ctx.SetBlob(key_diff_src_mem, diff_src_memory); + + // create activation backward primitive + p_grad = std::make_shared( + backward_pd, *src_memory, *diff_dst_memory, *diff_src_memory); + dev_ctx.SetBlob(key_grad, p_grad); + } else { + // primitives already exist + diff_src_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_diff_src_mem)); + auto diff_dst_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_diff_dst_mem)); + + diff_src_memory->set_data_handle( + platform::to_void_reinterpret_cast(diff_x_data)); + diff_dst_memory->set_data_handle( + platform::to_void_reinterpret_cast(diff_y_data)); + } // push primitive to stream and wait until it's executed - std::vector pipeline = {eltwise_bwd}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + std::vector pipeline; + pipeline.push_back(*p_grad); + stream(stream::kind::eager).submit(pipeline).wait(); + + diff_x->set_layout(DataLayout::kMKLDNN); + diff_x->set_format(GetMKLDNNFormat(*diff_src_memory)); } -} // anonymous namespace template struct MKLDNNActivationFunc : public BaseActivationFunctor { - template - void operator()(const ExecContext &ctx) const { + void operator()(const framework::ExecutionContext &ctx) const { eltwise_forward(ctx, algorithm); } }; template struct MKLDNNActivationGradFunc : public BaseActivationFunctor { - template - void operator()(const ExecContext &ctx) const { + void operator()(const framework::ExecutionContext &ctx) const { eltwise_grad(ctx, algorithm); } }; template -using ReluMkldnnFunctor = +using ReluMKLDNNFunctor = MKLDNNActivationFunc; template -using TanhMkldnnFunctor = +using TanhMKLDNNFunctor = MKLDNNActivationFunc; template -using SqrtMkldnnFunctor = +using SqrtMKLDNNFunctor = MKLDNNActivationFunc; template -using AbsMkldnnFunctor = +using AbsMKLDNNFunctor = MKLDNNActivationFunc; template -using ReluMkldnnGradFunctor = +using ReluMKLDNNGradFunctor = MKLDNNActivationGradFunc; template -using TanhMkldnnGradFunctor = +using TanhMKLDNNGradFunctor = MKLDNNActivationGradFunc; template -using SqrtMkldnnGradFunctor = +using SqrtMKLDNNGradFunctor = MKLDNNActivationGradFunc; template -using AbsMkldnnGradFunctor = +using AbsMKLDNNGradFunctor = MKLDNNActivationGradFunc; } // namespace operators } // namespace paddle @@ -197,9 +341,9 @@ namespace ops = paddle::operators; ops::MKLDNNActivationGradKernel>); #define FOR_EACH_MKLDNN_KERNEL_FUNCTOR(__macro) \ - __macro(relu, ReluMkldnnFunctor, ReluMkldnnGradFunctor); \ - __macro(tanh, TanhMkldnnFunctor, TanhMkldnnGradFunctor); \ - __macro(sqrt, SqrtMkldnnFunctor, SqrtMkldnnGradFunctor); \ - __macro(abs, AbsMkldnnFunctor, AbsMkldnnGradFunctor); + __macro(relu, ReluMKLDNNFunctor, ReluMKLDNNGradFunctor); \ + __macro(tanh, TanhMKLDNNFunctor, TanhMKLDNNGradFunctor); \ + __macro(sqrt, SqrtMKLDNNFunctor, SqrtMKLDNNGradFunctor); \ + __macro(abs, AbsMKLDNNFunctor, AbsMKLDNNGradFunctor); FOR_EACH_MKLDNN_KERNEL_FUNCTOR(REGISTER_ACTIVATION_MKLDNN_KERNEL); diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 87ef55c50b..286b03d7b7 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -19,19 +19,20 @@ limitations under the License. */ namespace paddle { namespace operators { -#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ - class OP_NAME##OpMaker \ - : public ::paddle::framework::OpProtoAndCheckerMaker { \ - public: \ - OP_NAME##OpMaker(OpProto *proto, OpAttrChecker *op_checker) \ - : ::paddle::framework::OpProtoAndCheckerMaker(proto, op_checker) { \ - AddInput("X", "Input of " #OP_NAME "operator"); \ - AddOutput("Out", "Output of" #OP_NAME "operator"); \ - AddAttr("use_mkldnn", \ - "(bool, default false) Only used in mkldnn kernel") \ - .SetDefault(false); \ - AddComment(#OP_COMMENT); \ - } \ +using paddle::framework::Tensor; + +#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ + class OP_NAME##OpMaker \ + : public ::paddle::framework::OpProtoAndCheckerMaker { \ + public: \ + void Make() override { \ + AddInput("X", "Input of " #OP_NAME " operator"); \ + AddOutput("Out", "Output of " #OP_NAME " operator").Reuse("X"); \ + AddAttr("use_mkldnn", \ + "(bool, default false) Only used in mkldnn kernel") \ + .SetDefault(false); \ + AddComment(#OP_COMMENT); \ + } \ } #define REGISTER_ACTIVATION_OP_GRAD_MAKER(OP_NAME, KERNEL_TYPE) \ @@ -42,7 +43,7 @@ namespace operators { \ protected: \ std::unique_ptr<::paddle::framework::OpDesc> Apply() const override { \ - auto *op = new ::paddle::framework::OpDesc(); \ + auto* op = new ::paddle::framework::OpDesc(); \ op->SetType(#KERNEL_TYPE "_grad"); \ op->SetInput("Out", Output("Out")); \ op->SetInput(::paddle::framework::GradVarName("Out"), \ @@ -55,23 +56,53 @@ namespace operators { } \ } +framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx, + const framework::OperatorWithKernel& oper, + const std::string& name) { + framework::LibraryType library{framework::LibraryType::kPlain}; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; +#ifdef PADDLE_WITH_MKLDNN + auto it = oper.Attrs().find("use_mkldnn"); + if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + } +#endif + return framework::OpKernelType( + framework::ToDataType(ctx.Input(name)->type()), + ctx.GetPlace(), layout, library); +} + class ActivationOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext *ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return GetKernelType(ctx, *this, "X"); + } }; class ActivationOpGrad : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext *ctx) const override { + void InferShape(framework::InferShapeContext* ctx) const override { ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out")); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return GetKernelType(ctx, *this, "Out"); + } }; __attribute__((unused)) constexpr char SigmoidDoc[] = R"DOC( @@ -84,7 +115,7 @@ $$out = \frac{1}{1 + e^{-x}}$$ __attribute__((unused)) constexpr char LogSigmoidDoc[] = R"DOC( Logsigmoid Activation Operator -$$out = \log \frac{1}{1 + e^{-x}}$$ +$$out = \\log \\frac{1}{1 + e^{-x}}$$ )DOC"; @@ -105,14 +136,14 @@ $out = \max(x, 0)$ __attribute__((unused)) constexpr char TanhDoc[] = R"DOC( Tanh Activation Operator. -$$out = \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ +$$out = \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ )DOC"; __attribute__((unused)) constexpr char TanhShrinkDoc[] = R"DOC( TanhShrink Activation Operator. -$$out = x - \frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ +$$out = x - \\frac{e^{x} - e^{-x}}{e^{x} + e^{-x}}$$ )DOC"; @@ -168,7 +199,7 @@ $out = [x]$ __attribute__((unused)) constexpr char ReciprocalDoc[] = R"DOC( Reciprocal Activation Operator. -$$out = \frac{1}{x}$$ +$$out = \\frac{1}{x}$$ )DOC"; @@ -204,8 +235,7 @@ $$out = \frac{x}{1 + |x|}$$ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - LeakyReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of LeakyRelu operator"); AddOutput("Out", "Output of LeakyRelu operator"); AddAttr("alpha", "The small negative slope").SetDefault(0.02f); @@ -220,21 +250,19 @@ $out = \max(x, \alpha * x)$ class SoftShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Softshrink operator"); AddOutput("Out", "Output of Softshrink operator"); AddAttr("lambda", "non-negative offset").SetDefault(0.5f); AddComment(R"DOC( -Softshrink Activation Operator. +:strong:`Softshrink Activation Operator` -$$ -out = \begin{cases} - x - \lambda, \text{if } x > \lambda \\ - x + \lambda, \text{if } x < -\lambda \\ - 0, \text{otherwise} - \end{cases} -$$ +.. math:: + out = \begin{cases} + x - \lambda, \text{if } x > \lambda \\ + x + \lambda, \text{if } x < -\lambda \\ + 0, \text{otherwise} + \end{cases} )DOC"); } @@ -242,22 +270,21 @@ $$ class HardShrinkOpMaker : public framework::OpProtoAndCheckerMaker { public: - HardShrinkOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of HardShrink operator"); AddOutput("Out", "Output of HardShrink operator"); - AddAttr("threshold", "The value of threshold for HardShrink") + AddAttr("threshold", + "The value of threshold for HardShrink. [default: 0.5]") .SetDefault(0.5f); AddComment(R"DOC( -HardShrink Activation Operator. +:strong:`HardShrink activation operator` -$$ -out = \begin{cases} - x, \text{if } x > \lambda \\ - x, \text{if } x < -\lambda \\ - 0, \text{otherwise} - \end{cases} -$$ +.. math:: + out = \begin{cases} + x, \text{if } x > \lambda \\ + x, \text{if } x < -\lambda \\ + 0, \text{otherwise} + \end{cases} )DOC"); } @@ -265,8 +292,7 @@ $$ class BReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - BReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of BRelu operator"); AddOutput("Out", "Output of BRelu operator"); AddAttr("t_min", "The min marginal value of BRelu") @@ -284,8 +310,7 @@ $out = \max(\min(x, t_{min}), t_{max})$ class SoftReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of SoftRelu operator"); AddOutput("Out", "Output of SoftRelu operator"); AddAttr("threshold", "The threshold value of SoftRelu") @@ -301,8 +326,7 @@ $out = \ln(1 + \exp(\max(\min(x, threshold), threshold))$ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { public: - ELUOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of ELU operator"); AddOutput("Out", "Output of ELU operator"); AddAttr("alpha", "The alpha value of ELU").SetDefault(1.0f); @@ -320,8 +344,7 @@ $out = \max(0, x) + \min(0, \alpha * (e^x - 1))$ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { public: - Relu6OpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Relu6 operator"); AddOutput("Out", "Output of Relu6 operator"); AddAttr("threshold", "The threshold value of Relu6") @@ -337,8 +360,7 @@ $out = \min(\max(0, x), 6)$ class PowOpMaker : public framework::OpProtoAndCheckerMaker { public: - PowOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Pow operator"); AddOutput("Out", "Output of Pow operator"); AddAttr("factor", "The exponential factor of Pow").SetDefault(1.0f); @@ -353,8 +375,7 @@ $out = x^{factor}$ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { public: - STanhOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of STanh operator"); AddOutput("Out", "Output of STanh operator"); AddAttr("scale_a", "The scale parameter of a for the input") @@ -364,7 +385,7 @@ class STanhOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( STanh Activation Operator. -$$out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ +$$out = b * \\frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ )DOC"); } @@ -372,30 +393,28 @@ $$out = b * \frac{e^{a * x} - e^{-a * x}}{e^{a * x} + e^{-a * x}}$$ class ThresholdedReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - ThresholdedReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of ThresholdedRelu operator"); AddOutput("Out", "Output of ThresholdedRelu operator"); - AddAttr("threshold", "The threshold location of activation") + AddAttr("threshold", + "The threshold location of activation. [default 1.0].") .SetDefault(1.0f); AddComment(R"DOC( -ThresholdedRelu Activation Operator. +:strong:`ThresholdedRelu activation operator` -$$ -out = \begin{cases} - x, \text{if } x > threshold \\ - 0, \text{otherwise} - \end{cases} -$$ +.. math:: + out = \begin{cases} + x, \text{if } x > threshold \\ + 0, \text{otherwise} + \end{cases} )DOC"); } }; class HardSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { public: - HardSigmoidOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of HardSigmoid operator"); AddOutput("Out", "Output of HardSigmoid operator"); AddAttr("slope", "Slope for linear approximation of sigmoid") @@ -420,15 +439,14 @@ It is recommended to use the defaults for this activation. class SwishOpMaker : public framework::OpProtoAndCheckerMaker { public: - SwishOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input of Swish operator"); AddOutput("Out", "Output of Swish operator"); AddAttr("beta", "Constant beta of swish operator").SetDefault(1.0f); AddComment(R"DOC( Swish Activation Operator. -$$out = \frac{x}{1 + e^{- \beta x}}$$ +$$out = \\frac{x}{1 + e^{- \beta x}}$$ )DOC"); } diff --git a/paddle/fluid/operators/adadelta_op.cc b/paddle/fluid/operators/adadelta_op.cc index 7bdb3f274a..d1970515f5 100644 --- a/paddle/fluid/operators/adadelta_op.cc +++ b/paddle/fluid/operators/adadelta_op.cc @@ -66,8 +66,7 @@ class AdadeltaOp : public framework::OperatorWithKernel { class AdadeltaOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdadeltaOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("AvgSquaredGrad", "(Tensor) Input average of squared gradient"); diff --git a/paddle/fluid/operators/adagrad_op.cc b/paddle/fluid/operators/adagrad_op.cc index 1227129429..a3ef9ad9f9 100644 --- a/paddle/fluid/operators/adagrad_op.cc +++ b/paddle/fluid/operators/adagrad_op.cc @@ -67,8 +67,7 @@ class AdagradOp : public framework::OperatorWithKernel { class AdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdagradOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("Moment", "(Tensor) Second moment"); diff --git a/paddle/fluid/operators/adam_op.cc b/paddle/fluid/operators/adam_op.cc index f12f0c6663..5d670fe3b9 100644 --- a/paddle/fluid/operators/adam_op.cc +++ b/paddle/fluid/operators/adam_op.cc @@ -56,9 +56,12 @@ class AdamOp : public framework::OperatorWithKernel { "Beta2 power accumulator should have 1 dimension"); auto param_dims = ctx->GetInputDim("Param"); - PADDLE_ENFORCE_EQ( - param_dims, ctx->GetInputDim("Grad"), - "Param and Grad input of AdamOp should have same dimension"); + if (ctx->GetInputsVarType("Grad")[0] == + framework::proto::VarType::LOD_TENSOR) { + PADDLE_ENFORCE_EQ( + param_dims, ctx->GetInputDim("Grad"), + "Param and Grad input of AdamOp should have same dimension"); + } PADDLE_ENFORCE_EQ( param_dims, ctx->GetInputDim("Moment1"), "Param and Moment1 input of AdamOp should have same dimension"); @@ -80,8 +83,7 @@ class AdamOp : public framework::OperatorWithKernel { class AdamOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdamOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("LearningRate", "(Tensor) Learning rate"); @@ -90,9 +92,9 @@ class AdamOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Beta1Pow", "(Tensor) Input beta1 power accumulator"); AddInput("Beta2Pow", "(Tensor) Input beta2 power accumulator"); - AddOutput("ParamOut", "(Tensor) Output parameter"); - AddOutput("Moment1Out", "(Tensor) Output first moment"); - AddOutput("Moment2Out", "(Tensor) Output second moment"); + AddOutput("ParamOut", "(Tensor) Output parameter").Reuse("Param"); + AddOutput("Moment1Out", "(Tensor) Output first moment").Reuse("Moment1"); + AddOutput("Moment2Out", "(Tensor) Output second moment").Reuse("Moment2"); AddAttr("beta1", "(float, default 0.9) " diff --git a/paddle/fluid/operators/adam_op.h b/paddle/fluid/operators/adam_op.h index f82ff47b52..84a584f424 100644 --- a/paddle/fluid/operators/adam_op.h +++ b/paddle/fluid/operators/adam_op.h @@ -282,6 +282,10 @@ class AdamOpKernel : public framework::OpKernel { } else if (grad_var->IsType()) { auto& grad = Ref(ctx.Input("Grad"), "Must set Grad"); + if (grad.rows().size() == 0) { + VLOG(3) << "grad row size is 0!!"; + return; + } // merge duplicated rows if any. scatter::MergeAdd merge_func; auto grad_merge = @@ -289,11 +293,18 @@ class AdamOpKernel : public framework::OpKernel { auto& grad_tensor = grad_merge.value(); const T* grad_data = grad_tensor.template data(); int64_t* rows = nullptr; +// When compiled without CUDA, the CUDAMutableData() interface should not be +// provided. +#if defined(PADDLE_WITH_CUDA) if (platform::is_gpu_place(ctx.GetPlace())) { rows = grad_merge.mutable_rows()->CUDAMutableData(ctx.GetPlace()); } else { +#endif rows = grad_merge.mutable_rows()->data(); + +#if defined(PADDLE_WITH_CUDA) } +#endif auto row_numel = grad_tensor.numel() / grad_merge.rows().size(); SparseAdamFunctor functor( diff --git a/paddle/fluid/operators/adamax_op.cc b/paddle/fluid/operators/adamax_op.cc index 608b855d58..32062574bc 100644 --- a/paddle/fluid/operators/adamax_op.cc +++ b/paddle/fluid/operators/adamax_op.cc @@ -74,8 +74,7 @@ class AdamaxOp : public framework::OperatorWithKernel { class AdamaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - AdamaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("LearningRate", "(Tensor) Learning rate"); diff --git a/paddle/fluid/operators/arg_max_op.cc b/paddle/fluid/operators/arg_max_op.cc new file mode 100644 index 0000000000..8174d37358 --- /dev/null +++ b/paddle/fluid/operators/arg_max_op.cc @@ -0,0 +1,33 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/arg_min_max_op_base.h" + +REGISTER_OPERATOR(arg_max, paddle::operators::ArgMinMaxOp, + paddle::operators::ArgMaxOpMaker, + paddle::framework::EmptyGradOpMaker); + +REGISTER_OP_CPU_KERNEL( + arg_max, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel); diff --git a/paddle/fluid/operators/arg_max_op.cu b/paddle/fluid/operators/arg_max_op.cu new file mode 100644 index 0000000000..a147d77a9e --- /dev/null +++ b/paddle/fluid/operators/arg_max_op.cu @@ -0,0 +1,31 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/arg_min_max_op_base.h" + +REGISTER_OP_CUDA_KERNEL( + arg_max, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel, + paddle::operators::ArgMaxKernel); diff --git a/paddle/fluid/operators/arg_min_max_op_base.h b/paddle/fluid/operators/arg_min_max_op_base.h new file mode 100644 index 0000000000..6cbdaefeda --- /dev/null +++ b/paddle/fluid/operators/arg_min_max_op_base.h @@ -0,0 +1,160 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/string/printf.h" + +namespace paddle { +namespace operators { + +enum ArgMinMaxType { kArgMin, kArgMax }; + +template +struct ArgMinMaxFunctor {}; + +#define DECLARE_ARG_MIN_MAX_FUNCTOR(eigen_op_type, enum_argminmax_value) \ + template \ + struct ArgMinMaxFunctor { \ + void operator()(const DeviceContext& ctx, const framework::LoDTensor& in, \ + framework::LoDTensor* out, int64_t axis) { \ + auto in_eigen = framework::EigenTensor::From(in); \ + auto out_eigen = framework::EigenTensor::From(*out); \ + out_eigen.device(*(ctx.eigen_device())) = \ + in_eigen.eigen_op_type(axis).template cast(); \ + } \ + } + +DECLARE_ARG_MIN_MAX_FUNCTOR(argmin, ArgMinMaxType::kArgMin); +DECLARE_ARG_MIN_MAX_FUNCTOR(argmax, ArgMinMaxType::kArgMax); + +template +class ArgMinMaxKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& x = *(ctx.Input("X")); + auto& out = *(ctx.Output("Out")); + out.mutable_data(ctx.GetPlace()); + auto axis = ctx.Attr("axis"); + auto& dev_ctx = ctx.template device_context(); + +#define CALL_ARG_MINMAX_FUNCTOR(rank) \ + ArgMinMaxFunctor \ + functor##rank; \ + functor##rank(dev_ctx, x, &out, axis) + + switch (x.dims().size()) { + case 1: + CALL_ARG_MINMAX_FUNCTOR(1); + break; + case 2: + CALL_ARG_MINMAX_FUNCTOR(2); + break; + case 3: + CALL_ARG_MINMAX_FUNCTOR(3); + break; + case 4: + CALL_ARG_MINMAX_FUNCTOR(4); + break; + case 5: + CALL_ARG_MINMAX_FUNCTOR(5); + break; + case 6: + CALL_ARG_MINMAX_FUNCTOR(6); + break; + default: + PADDLE_THROW( + "%s operator doesn't supports tensors whose ranks are greater " + "than 6.", + (EnumArgMinMaxValue == kArgMin ? "argmin" : "argmax")); + break; +#undef CALL_ARG_MINMAX_FUNCTOR + } + } +}; + +template +using ArgMinKernel = + ArgMinMaxKernel; + +template +using ArgMaxKernel = + ArgMinMaxKernel; + +class ArgMinMaxOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + const auto& x_dims = ctx->GetInputDim("X"); + int64_t axis = ctx->Attrs().Get("axis"); + PADDLE_ENFORCE(axis >= -x_dims.size() && axis < x_dims.size(), + "'axis' must be inside [-Rank(X), Rank(X))"); + + auto x_rank = x_dims.size(); + if (axis < 0) axis += x_rank; + + std::vector vec; + for (int64_t i = 0; i < axis; i++) vec.push_back(x_dims[i]); + for (int64_t i = axis + 1; i < x_rank; i++) vec.push_back(x_dims[i]); + ctx->SetOutputDim("Out", framework::make_ddim(vec)); + } +}; + +class BaseArgMinMaxOpMaker : public framework::OpProtoAndCheckerMaker { + protected: + virtual const char* OpName() const = 0; + virtual const char* Name() const = 0; + + public: + void Make() override { + AddInput("X", "Input tensor."); + AddOutput("Out", "Output tensor."); + AddAttr("axis", "The axis in which to compute the arg indics."); + AddComment(string::Sprintf(R"DOC( + %s Operator. + + Computes the indices of the %s elements of the input tensor's element + along the provided axis. +)DOC", + OpName(), Name())); + } +}; + +class ArgMinOpMaker : public BaseArgMinMaxOpMaker { + protected: + const char* OpName() const override { return "ArgMin"; } + const char* Name() const override { return "min"; } +}; + +class ArgMaxOpMaker : public BaseArgMinMaxOpMaker { + protected: + const char* OpName() const override { return "ArgMax"; } + const char* Name() const override { return "max"; } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/arg_min_op.cc b/paddle/fluid/operators/arg_min_op.cc new file mode 100644 index 0000000000..41f188029f --- /dev/null +++ b/paddle/fluid/operators/arg_min_op.cc @@ -0,0 +1,33 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/arg_min_max_op_base.h" + +REGISTER_OPERATOR(arg_min, paddle::operators::ArgMinMaxOp, + paddle::operators::ArgMinOpMaker, + paddle::framework::EmptyGradOpMaker); + +REGISTER_OP_CPU_KERNEL( + arg_min, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel); diff --git a/paddle/fluid/operators/arg_min_op.cu b/paddle/fluid/operators/arg_min_op.cu new file mode 100644 index 0000000000..4d02050850 --- /dev/null +++ b/paddle/fluid/operators/arg_min_op.cu @@ -0,0 +1,31 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/arg_min_max_op_base.h" + +REGISTER_OP_CUDA_KERNEL( + arg_min, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel, + paddle::operators::ArgMinKernel); diff --git a/paddle/fluid/operators/argsort_op.cc b/paddle/fluid/operators/argsort_op.cc new file mode 100644 index 0000000000..a2f5a25457 --- /dev/null +++ b/paddle/fluid/operators/argsort_op.cc @@ -0,0 +1,87 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/argsort_op.h" + +namespace paddle { +namespace operators { + +class ArgsortOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ArgsortOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ArgsortOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Indices"), + "Output(Indices) of ArgsortOp should not be null."); + + auto in_dims = ctx->GetInputDim("X"); + int axis = ctx->Attrs().Get("axis"); + + auto num_dims = in_dims.size(); + PADDLE_ENFORCE(axis < num_dims, + "Attr(axis) %d of ArgsortOp is out of bounds for Input(X)'s " + "rank %d.", + axis, num_dims); + PADDLE_ENFORCE(axis >= -num_dims, + "Attr(axis) %d of ArgsortOp must be not less than " + "-rank(Input(X)) (%d).", + axis, num_dims); + + ctx->SetOutputDim("Out", in_dims); + ctx->SetOutputDim("Indices", in_dims); + ctx->ShareLoD("X", "Out"); + ctx->ShareLoD("X", "Indices"); + } +}; + +class ArgsortOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor) The input of Argsort op."); + AddOutput("Out", + "(Tensor) The sorted tensor of Argsort op, with the same " + "shape as Input(X)."); + AddOutput("Indices", + "(Tensor) The indices of a tensor giving the sorted order, with " + "the same shape as Input(X)."); + AddComment(R"DOC( +Argsort operator + +Performs sorting on the input tensor along the given axis and outputs two +tensors, Output(Out) and Output(Indices). They reserve the same shape +with Input(X), and Output(Out) represents the sorted tensor while +Output(Indices) gives the sorted order along the given axis Attr(axis). + + )DOC"); + AddAttr("axis", + "(int, default -1) The axis along which to sort the tensor. " + "When axis < 0, the actual axis will be the |axis|'th " + "counting backwards. Default -1, the last dimension.") + .SetDefault(-1); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(argsort, ops::ArgsortOp, ops::ArgsortOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(argsort, + ops::ArgsortKernel, + ops::ArgsortKernel); diff --git a/paddle/fluid/operators/argsort_op.cu b/paddle/fluid/operators/argsort_op.cu new file mode 100644 index 0000000000..7d5199aae7 --- /dev/null +++ b/paddle/fluid/operators/argsort_op.cu @@ -0,0 +1,151 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/argsort_op.h" +#include "paddle/fluid/platform/assert.h" +#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/cuda_primitives.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using platform::PADDLE_CUDA_NUM_THREADS; + +const int kMaxRank = 9; // The max rank of a tensor allowed in Fluid + +__global__ void ComputeTargetIdx(const int64_t* in_dims, int dims_size, + int axis, int64_t n, int64_t* trg_idx, + int64_t* med_ids) { + int64_t index = threadIdx.x + blockDim.x * blockIdx.x; + if (index < n) { + int64_t shape_out_axis[kMaxRank - 1] = {0}; + int64_t dims_out_axis[kMaxRank - 1] = {0}; + int64_t tmp = index; + int64_t pos_in_axis = 0; + int64_t i = dims_size - 2; + int64_t dim_axis = 0; + for (int64_t j = dims_size - 1; j >= 0; --j) { + int64_t dim = in_dims[j]; + if (j != axis) { + shape_out_axis[i] = tmp % dim; + dims_out_axis[i] = dim; + i--; + } else { + dim_axis = dim; + pos_in_axis = tmp % dim_axis; + } + tmp /= dim; + } + int64_t group = (dims_size > 1) ? shape_out_axis[0] : 0; + for (int64_t j = 0; j < dims_size - 2; ++j) { + group = group * dims_out_axis[j + 1] + shape_out_axis[j + 1]; + } + + int64_t traget_idx = group * dim_axis + pos_in_axis; + trg_idx[index] = traget_idx; + med_ids[traget_idx] = pos_in_axis; + } +} + +template +__global__ void PermuteInData(const T* in, const int64_t* trg_idx, int64_t n, + T* med_out) { + int index = threadIdx.x + blockDim.x * blockIdx.x; + if (index < n) { + med_out[trg_idx[index]] = in[index]; + } +} + +template +__global__ void Sort(int64_t axis_dim, int64_t groups, T* med_out, + int64_t* med_ids) { + int index = threadIdx.x + blockDim.x * blockIdx.x; + if (index < groups) { + thrust::sort_by_key(thrust::device, med_out + index * axis_dim, + med_out + axis_dim * (1 + index), + med_ids + index * axis_dim); + } +} + +template +__global__ void PermuteMediateData(const T* med_out, const int64_t* med_ids, + const int64_t* trg_idx, int64_t n, T* out, + int64_t* indices) { + int index = threadIdx.x + blockDim.x * blockIdx.x; + if (index < n) { + out[index] = med_out[trg_idx[index]]; + indices[index] = med_ids[trg_idx[index]]; + } +} + +template +class ArgsortOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); + int axis = ctx.Attr("axis"); + + auto in_dims = input->dims(); + axis = (axis < 0) ? (in_dims.size() + axis) : axis; + + const T* in_data = input->data(); + T* out_data = output->mutable_data(ctx.GetPlace()); + int64_t* ids_data = indices->mutable_data(ctx.GetPlace()); + + int64_t numel = input->numel(); + int64_t groups = numel / in_dims[axis]; + + std::vector in_dims_vec = vectorize(in_dims); + thrust::device_vector in_dims_dev(in_dims_vec.begin(), + in_dims_vec.end()); + int64_t* in_dims_data = thrust::raw_pointer_cast(in_dims_dev.data()); + // Mediate tensor for sorting data and indices + Tensor mediate_output, mediate_indices; + T* med_out_data = + mediate_output.mutable_data(input->dims(), ctx.GetPlace()); + int64_t* med_ids_data = + mediate_indices.mutable_data(in_dims, ctx.GetPlace()); + // Target index of each element along the given axis in the mediate tensors + Tensor trg_idx_t; + int64_t* trg_idx = trg_idx_t.mutable_data(in_dims, ctx.GetPlace()); + + auto stream = ctx.cuda_device_context().stream(); + const int num_threads = PADDLE_CUDA_NUM_THREADS; + + ComputeTargetIdx<<<(numel - 1) / num_threads + 1, num_threads, 0, stream>>>( + in_dims_data, in_dims.size(), axis, numel, trg_idx, med_ids_data); + + PermuteInData<<<(numel - 1) / num_threads + 1, num_threads, 0, stream>>>( + in_data, trg_idx, numel, med_out_data); + + Sort<<<(groups - 1) / num_threads + 1, num_threads, 0, stream>>>( + in_dims[axis], groups, med_out_data, med_ids_data); + + PermuteMediateData<<<(numel - 1) / num_threads + 1, num_threads, 0, + stream>>>(med_out_data, med_ids_data, trg_idx, numel, + out_data, ids_data); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_CUDA_KERNEL(argsort, paddle::operators::ArgsortOpCUDAKernel, + paddle::operators::ArgsortOpCUDAKernel); diff --git a/paddle/fluid/operators/argsort_op.h b/paddle/fluid/operators/argsort_op.h new file mode 100644 index 0000000000..7e9112cfb7 --- /dev/null +++ b/paddle/fluid/operators/argsort_op.h @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class ArgsortKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("X"); + auto* output = ctx.Output("Out"); + auto* indices = ctx.Output("Indices"); + int axis = ctx.Attr("axis"); + + auto in_dims = input->dims(); + axis = (axis < 0) ? (in_dims.size() + axis) : axis; + + const T* in_data = input->data(); + T* out_data = output->mutable_data(ctx.GetPlace()); + int64_t* ids_data = indices->mutable_data(ctx.GetPlace()); + + int64_t groups = input->numel() / in_dims[axis]; + int64_t stride = (axis == in_dims.size() - 1) + ? 1 + : framework::product(framework::slice_ddim( + in_dims, axis + 1, in_dims.size())); + + for (int64_t i = 0; i < groups; ++i) { + int64_t idx = i; + std::vector shape_vec(in_dims.size(), 0); + for (int64_t dim = in_dims.size() - 1; dim >= 0; --dim) { + if (dim != axis) { + shape_vec[dim] = idx % in_dims[dim]; + idx /= in_dims[dim]; + } + } + + int64_t start_index = shape_vec[0]; + for (int64_t dim = 0; dim < in_dims.size() - 1; ++dim) { + start_index = start_index * in_dims[dim + 1] + shape_vec[dim + 1]; + } + + std::vector org_index_vec(in_dims[axis], start_index); + for (int64_t j = 1; j < in_dims[axis]; ++j) { + org_index_vec[j] += j * stride; + } + + std::sort(org_index_vec.begin(), org_index_vec.end(), + [in_data](const int64_t v1, const int64_t v2) { + return in_data[v1] < in_data[v2]; + }); + + for (size_t j = 0; j < org_index_vec.size(); ++j) { + int64_t index = start_index + j * stride; + out_data[index] = in_data[org_index_vec[j]]; + ids_data[index] = (org_index_vec[j] - start_index) / stride; + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/array_to_lod_tensor_op.cc b/paddle/fluid/operators/array_to_lod_tensor_op.cc index 5db2e4540e..149226e92d 100644 --- a/paddle/fluid/operators/array_to_lod_tensor_op.cc +++ b/paddle/fluid/operators/array_to_lod_tensor_op.cc @@ -123,8 +123,7 @@ class ArrayToLoDTensorOp : public framework::OperatorBase { class ArrayToLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ArrayToLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(std::vector) A vector of tensors that is going to " "be casted to a big LoDTensor."); diff --git a/paddle/fluid/operators/assign_op.cc b/paddle/fluid/operators/assign_op.cc index d372213e1b..d9294048a9 100644 --- a/paddle/fluid/operators/assign_op.cc +++ b/paddle/fluid/operators/assign_op.cc @@ -94,8 +94,7 @@ class AssignOp : public framework::OperatorBase { class AssignOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - AssignOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, SelectedRows or LoDTensorArray) The input variable " "could be LoDTensor, SelectedRows or LoDTensorArray.") diff --git a/paddle/fluid/operators/assign_value_op.cc b/paddle/fluid/operators/assign_value_op.cc index 993610fded..a757916be7 100644 --- a/paddle/fluid/operators/assign_value_op.cc +++ b/paddle/fluid/operators/assign_value_op.cc @@ -45,8 +45,7 @@ class AssignValueOp : public framework::OperatorWithKernel { class AssignValueOpMaker : public framework::OpProtoAndCheckerMaker { public: - AssignValueOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "(Tensor) Output tensor of assign_value operator."); AddAttr>("shape", "(vector) " @@ -71,6 +70,7 @@ $$Out = values$$ namespace ops = paddle::operators; -REGISTER_OPERATOR(assign_value, ops::AssignValueOp, ops::AssignValueOpMaker); +REGISTER_OPERATOR(assign_value, ops::AssignValueOp, ops::AssignValueOpMaker, + paddle::framework::EmptyGradOpMaker); REGISTER_OP_CPU_KERNEL(assign_value, ops::AssignValueKernel, ops::AssignValueKernel); diff --git a/paddle/fluid/operators/auc_op.cc b/paddle/fluid/operators/auc_op.cc index a168eaeab5..5edecd18e6 100644 --- a/paddle/fluid/operators/auc_op.cc +++ b/paddle/fluid/operators/auc_op.cc @@ -24,50 +24,60 @@ class AucOp : public framework::OperatorWithKernel { protected: void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("Out"), "Input of Out should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Indices"), - "Input of Indices should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Predict"), + "Input of Out should not be null."); PADDLE_ENFORCE(ctx->HasInput("Label"), "Input of Label should not be null."); - auto inference_height = ctx->GetInputDim("Out")[0]; + auto predict_width = ctx->GetInputDim("Predict")[1]; + PADDLE_ENFORCE_EQ(predict_width, 2, "Only support binary classification"); + auto predict_height = ctx->GetInputDim("Predict")[0]; auto label_height = ctx->GetInputDim("Label")[0]; - PADDLE_ENFORCE_EQ(inference_height, label_height, + PADDLE_ENFORCE_EQ(predict_height, label_height, "Out and Label should have same height."); + int num_thres = ctx->Attrs().Get("num_thresholds"); + ctx->SetOutputDim("AUC", {1}); - ctx->ShareLoD("Out", /*->*/ "AUC"); + ctx->SetOutputDim("TPOut", {num_thres}); + ctx->SetOutputDim("TNOut", {num_thres}); + ctx->SetOutputDim("FPOut", {num_thres}); + ctx->SetOutputDim("FNOut", {num_thres}); + + ctx->ShareLoD("Predict", /*->*/ "AUC"); } protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { return framework::OpKernelType( - framework::ToDataType(ctx.Input("Out")->type()), + framework::ToDataType(ctx.Input("Predict")->type()), ctx.device_context()); } }; class AucOpMaker : public framework::OpProtoAndCheckerMaker { public: - AucOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Out", - "A floating point 2D tensor, values are in the range [0, 1]." - "Each row is sorted in descending order. This input should be the" - "output of topk." + void Make() override { + AddInput("Predict", + "A floating point 2D tensor with shape [batch_size, 2], values " + "are in the range [0, 1]." "Typically, this tensor indicates the probability of each label"); - AddInput("Indices", - "An int 2D tensor, indicating the indices of original" - "tensor before sorting. Typically, this tensor indicates which " - "label the probability stands for."); AddInput("Label", - "A 2D int tensor indicating the label of the training data." - "The height is batch size and width is always 1."); + "A 2D int tensor indicating the label of the training data. " + "shape: [batch_size, 1]"); + AddInput("TP", "True-Positive value."); + AddInput("FP", "False-Positive value."); + AddInput("TN", "True-Negative value."); + AddInput("FN", "False-Negative value."); // TODO(typhoonzero): support weight input AddOutput("AUC", "A scalar representing the " "current area-under-the-curve."); + AddOutput("TPOut", "True-Positive value."); + AddOutput("FPOut", "False-Positive value."); + AddOutput("TNOut", "True-Negative value."); + AddOutput("FNOut", "False-Negative value."); AddAttr("curve", "Curve type, can be 'ROC' or 'PR'.") .SetDefault("ROC"); diff --git a/paddle/fluid/operators/auc_op.h b/paddle/fluid/operators/auc_op.h index 8b016c3d31..0a18585edb 100644 --- a/paddle/fluid/operators/auc_op.h +++ b/paddle/fluid/operators/auc_op.h @@ -31,58 +31,54 @@ template class AucKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { - auto* inference = ctx.Input("Out"); + auto* predict = ctx.Input("Predict"); auto* label = ctx.Input("Label"); auto* auc = ctx.Output("AUC"); + // Only use output var for now, make sure it's persistable and + // not cleaned up for each batch. + auto* true_positive = ctx.Output("TPOut"); + auto* false_positive = ctx.Output("FPOut"); + auto* true_negative = ctx.Output("TNOut"); + auto* false_negative = ctx.Output("FNOut"); - float* auc_data = auc->mutable_data(ctx.GetPlace()); + auto* auc_data = auc->mutable_data(ctx.GetPlace()); std::string curve = ctx.Attr("curve"); int num_thresholds = ctx.Attr("num_thresholds"); - std::vector thresholds_list; + std::vector thresholds_list; thresholds_list.reserve(num_thresholds); for (int i = 1; i < num_thresholds - 1; i++) { - thresholds_list[i] = static_cast(i) / (num_thresholds - 1); + thresholds_list[i] = static_cast(i) / (num_thresholds - 1); } - const float kEpsilon = 1e-7; + const double kEpsilon = 1e-7; thresholds_list[0] = 0.0f - kEpsilon; thresholds_list[num_thresholds - 1] = 1.0f + kEpsilon; - size_t batch_size = inference->dims()[0]; - size_t inference_width = inference->dims()[1]; + size_t batch_size = predict->dims()[0]; + size_t inference_width = predict->dims()[1]; - const T* inference_data = inference->data(); - const int64_t* label_data = label->data(); + const T* inference_data = predict->data(); + const auto* label_data = label->data(); - // Create local tensor for storing the curve: TP, FN, TN, FP - // TODO(typhoonzero): use eigen op to caculate these values. - Tensor true_positive, false_positive, true_negative, false_negative; - - true_positive.Resize({num_thresholds}); - false_negative.Resize({num_thresholds}); - true_negative.Resize({num_thresholds}); - false_positive.Resize({num_thresholds}); - - int64_t* tp_data = true_positive.mutable_data(ctx.GetPlace()); - int64_t* fn_data = false_negative.mutable_data(ctx.GetPlace()); - int64_t* tn_data = true_negative.mutable_data(ctx.GetPlace()); - int64_t* fp_data = false_positive.mutable_data(ctx.GetPlace()); + auto* tp_data = true_positive->mutable_data(ctx.GetPlace()); + auto* fn_data = false_negative->mutable_data(ctx.GetPlace()); + auto* tn_data = true_negative->mutable_data(ctx.GetPlace()); + auto* fp_data = false_positive->mutable_data(ctx.GetPlace()); for (int idx_thresh = 0; idx_thresh < num_thresholds; idx_thresh++) { - // caculate TP, FN, TN, FP for current thresh + // calculate TP, FN, TN, FP for current thresh int64_t tp = 0, fn = 0, tn = 0, fp = 0; for (size_t i = 0; i < batch_size; i++) { - // NOTE: label_data used as bool, labels >0 will be treated as true. + // NOTE: label_data used as bool, labels > 0 will be treated as true. if (label_data[i]) { - // use first(max) data in each row - if (inference_data[i * inference_width] >= + if (inference_data[i * inference_width + 1] >= (thresholds_list[idx_thresh])) { tp++; } else { fn++; } } else { - if (inference_data[i * inference_width] >= + if (inference_data[i * inference_width + 1] >= (thresholds_list[idx_thresh])) { fp++; } else { @@ -91,27 +87,27 @@ class AucKernel : public framework::OpKernel { } } // store rates - tp_data[idx_thresh] = tp; - fn_data[idx_thresh] = fn; - tn_data[idx_thresh] = tn; - fp_data[idx_thresh] = fp; + tp_data[idx_thresh] += tp; + fn_data[idx_thresh] += fn; + tn_data[idx_thresh] += tn; + fp_data[idx_thresh] += fp; } // epsilon to avoid divide by zero. - float epsilon = 1e-6; + double epsilon = 1e-6; // Riemann sum to caculate auc. Tensor tp_rate, fp_rate, rec_rate; tp_rate.Resize({num_thresholds}); fp_rate.Resize({num_thresholds}); rec_rate.Resize({num_thresholds}); - float* tp_rate_data = tp_rate.mutable_data(ctx.GetPlace()); - float* fp_rate_data = fp_rate.mutable_data(ctx.GetPlace()); - float* rec_rate_data = rec_rate.mutable_data(ctx.GetPlace()); + auto* tp_rate_data = tp_rate.mutable_data(ctx.GetPlace()); + auto* fp_rate_data = fp_rate.mutable_data(ctx.GetPlace()); + auto* rec_rate_data = rec_rate.mutable_data(ctx.GetPlace()); for (int i = 0; i < num_thresholds; i++) { - tp_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / + tp_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / (tp_data[i] + fn_data[i] + epsilon); fp_rate_data[i] = - static_cast(fp_data[i]) / (fp_data[i] + tn_data[i] + epsilon); - rec_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / + static_cast(fp_data[i]) / (fp_data[i] + tn_data[i] + epsilon); + rec_rate_data[i] = (static_cast(tp_data[i]) + epsilon) / (tp_data[i] + fp_data[i] + epsilon); } *auc_data = 0.0f; diff --git a/paddle/fluid/operators/average_accumulates_op.cc b/paddle/fluid/operators/average_accumulates_op.cc index b21deaf925..f389eab605 100644 --- a/paddle/fluid/operators/average_accumulates_op.cc +++ b/paddle/fluid/operators/average_accumulates_op.cc @@ -19,28 +19,28 @@ namespace operators { template <> void GetAccumulators( - const framework::ExecutionContext& ctx, int64_t* num_updates_, - int64_t* num_accumulates_, int64_t* old_num_accumulates_) { + const framework::ExecutionContext& ctx, int64_t* num_updates, + int64_t* num_accumulates, int64_t* old_num_accumulates) { auto* in_old_num_accumulates = ctx.Input("in_old_num_accumulates"); auto* in_num_accumulates = ctx.Input("in_num_accumulates"); auto* in_num_updates = ctx.Input("in_num_updates"); - *old_num_accumulates_ = in_old_num_accumulates->data()[0]; - *num_accumulates_ = in_num_accumulates->data()[0]; - *num_updates_ = in_num_updates->data()[0]; + *old_num_accumulates = in_old_num_accumulates->data()[0]; + *num_accumulates = in_num_accumulates->data()[0]; + *num_updates = in_num_updates->data()[0]; } template <> void SetAccumulators( - const framework::ExecutionContext& ctx, int64_t num_updates_, - int64_t num_accumulates_, int64_t old_num_accumulates_) { + const framework::ExecutionContext& ctx, int64_t num_updates, + int64_t num_accumulates, int64_t old_num_accumulates) { auto* out_old_num_accumulates = ctx.Output("out_old_num_accumulates"); auto* out_num_accumulates = ctx.Output("out_num_accumulates"); auto* out_num_updates = ctx.Output("out_num_updates"); - out_old_num_accumulates->data()[0] = old_num_accumulates_; - out_num_accumulates->data()[0] = num_accumulates_; - out_num_updates->data()[0] = num_updates_; + out_old_num_accumulates->data()[0] = old_num_accumulates; + out_num_accumulates->data()[0] = num_accumulates; + out_num_updates->data()[0] = num_updates; } class AverageAccumulatesOp : public framework::OperatorWithKernel { @@ -111,8 +111,7 @@ class AverageAccumulatesOp : public framework::OperatorWithKernel { class AverageAccumulatesOpMaker : public framework::OpProtoAndCheckerMaker { public: - AverageAccumulatesOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("param", "(Tensor), The parameter to be accumulated."); AddInput("in_sum_1", "(Tensor), A tensor used to store the parameter " @@ -178,7 +177,7 @@ class AverageAccumulatesOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( AverageAccumulates Operator. -Accumulate the sum of parameter whtin sliding window. The size of sliding window is +Accumulate the sum of parameter within sliding window. The size of sliding window is determined by 'average_window', 'max_average_window' and 'min_average_window'. Memory was shared by Input(in_sum_1) and Output(out_sum_1) which acts as an accumulator 'sum_1'. 'sum_2', 'sum_3', 'num_accumulates', 'old_num_accumulates' and 'num_updates' were the same as 'sum_1'. diff --git a/paddle/fluid/operators/average_accumulates_op.h b/paddle/fluid/operators/average_accumulates_op.h index 07ac5ced11..3958d3f685 100644 --- a/paddle/fluid/operators/average_accumulates_op.h +++ b/paddle/fluid/operators/average_accumulates_op.h @@ -54,8 +54,9 @@ class AverageAccumulatesKernel : public framework::OpKernel { float average_window = ctx.Attr("average_window"); int64_t max_average_window = ctx.Attr("max_average_window"); int64_t min_average_window = ctx.Attr("min_average_window"); - min_average_window = - std::min(min_average_window, max_average_window); + PADDLE_ENFORCE_LE(min_average_window, max_average_window, + "min_average_window shouldn't be larger than " + "max_average_window"); // Get inputs auto* param = ctx.Input("param"); diff --git a/paddle/fluid/operators/batch_norm_mkldnn_op.cc b/paddle/fluid/operators/batch_norm_mkldnn_op.cc index 0e4a56d4a4..9ab2179b5f 100644 --- a/paddle/fluid/operators/batch_norm_mkldnn_op.cc +++ b/paddle/fluid/operators/batch_norm_mkldnn_op.cc @@ -19,22 +19,15 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; +using batch_norm_bwd = mkldnn::batch_normalization_backward; +using batch_norm_fwd = mkldnn::batch_normalization_forward; +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::reorder; +using mkldnn::stream; using paddle::platform::MKLDNNDeviceContext; using paddle::platform::MKLDNNMemDesc; -using mkldnn::memory; - -template -using EigenArrayMap = - Eigen::Map>; -template -using ConstEigenArrayMap = - Eigen::Map>; -template -using EigenVectorArrayMap = Eigen::Map>; -template -using ConstEigenVectorArrayMap = - Eigen::Map>; +using platform::to_void_cast; namespace { template @@ -64,24 +57,16 @@ void run_batch_norm_op(Args &&... args) { mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); } -template -inline void *cast_const_to_void(const T *t) { - return static_cast(const_cast(t)); -} } // namespace template class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { public: void Compute(const framework::ExecutionContext &ctx) const override { - auto data_layout_str = ctx.Attr("data_layout"); - auto data_layout = framework::StringToDataLayout(data_layout_str); - PADDLE_ENFORCE(data_layout == framework::DataLayout::kNCHW, - "MKLDNN batch normalization handles only NCHW data layout"); - const float epsilon = ctx.Attr("epsilon"); const float momentum = ctx.Attr("momentum"); const bool is_test = ctx.Attr("is_test"); + const bool fuse_with_relu = ctx.Attr("fuse_with_relu"); const auto *x = ctx.Input("X"); const auto *mean = ctx.Input("Mean"); @@ -99,41 +84,57 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { const auto *scale = ctx.Input("Scale"); const auto *shift = ctx.Input("Bias"); - y->mutable_data(ctx.GetPlace()); - mean_out->mutable_data(ctx.GetPlace()); - variance_out->mutable_data(ctx.GetPlace()); + PADDLE_ENFORCE(x->layout() == DataLayout::kMKLDNN && + x->format() != memory::format::format_undef, + "Wrong layout/format set for Input x tensor"); + + const T *x_data = x->data(); + const T *mean_data = mean->data(); + const T *variance_data = variance->data(); + T *y_data = y->mutable_data(ctx.GetPlace()); + T *mean_out_data = mean_out->mutable_data(ctx.GetPlace()); + T *variance_out_data = variance_out->mutable_data(ctx.GetPlace()); + T *batch_mean_data = nullptr; + T *batch_variance_data = nullptr; if (!is_test) { - batch_mean->mutable_data(ctx.GetPlace()); - batch_variance->mutable_data(ctx.GetPlace()); + batch_mean_data = batch_mean->mutable_data(ctx.GetPlace()); + batch_variance_data = batch_variance->mutable_data(ctx.GetPlace()); } auto propagation = is_test == true ? mkldnn::prop_kind::forward_scoring : mkldnn::prop_kind::forward_training; - auto dims = paddle::framework::vectorize2int(x->dims()); - - auto src_md = - MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); - auto dst_md = - MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); - - auto src_pd = mkldnn::memory::primitive_desc{src_md, mkldnn_engine}; - auto dst_pd = mkldnn::memory::primitive_desc{dst_md, mkldnn_engine}; - - auto src = mkldnn::memory{src_pd, cast_const_to_void(x->data())}; - auto dst = mkldnn::memory{dst_pd, y->data()}; + auto src_tz = paddle::framework::vectorize2int(x->dims()); + auto scale_tz = paddle::framework::vectorize2int(scale->dims()); + PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); + const unsigned int ic = scale_tz[0]; unsigned flags = mkldnn::use_scale_shift; if (is_test) flags |= mkldnn::use_global_stats; + if (fuse_with_relu) flags |= mkldnn::fuse_bn_relu; + + // create mkldnn memory from input x tensor + mkldnn::memory::format input_format = + platform::MKLDNNFormatForSize(src_tz.size(), x->format()); + auto src_memory = memory( + {{{src_tz}, memory::data_type::f32, input_format}, mkldnn_engine}, + to_void_cast(x_data)); + + // create primitive descriptor for batch norm forward using bn_fwd_types = bn_type_traits; - auto batch_norm_fwd_desc = - bn_fwd_types::op_desc{propagation, src_md, epsilon, flags}; - auto batch_norm_fwd_pd = - bn_fwd_types::op_prim{batch_norm_fwd_desc, mkldnn_engine}; + auto batch_norm_fwd_desc = bn_fwd_types::op_desc{ + propagation, src_memory.get_primitive_desc().desc(), epsilon, flags}; + std::shared_ptr batch_norm_fwd_pd = + std::shared_ptr( + new batch_norm_fwd::primitive_desc(batch_norm_fwd_desc, + mkldnn_engine)); - const unsigned int ic = dims[1]; + // Save the pd to be used in backward pass + const std::string key = ctx.op().Output("SavedMean"); + const std::string key_batch_norm_fwd_pd = key + "@bn_fwd_pd"; + dev_ctx.SetBlob(key_batch_norm_fwd_pd, batch_norm_fwd_pd); // MKLDNN requires a single piece of memory for scale and shift/bias data const size_t scaleshift_size = 2 * ic; @@ -143,73 +144,58 @@ class BatchNormMKLDNNOpKernel : public paddle::framework::OpKernel { copy_to_weights(scale->data(), scale->data() + ic, shift->data(), shift->data() + ic, &scaleshift_data); - auto scaleshift_memory = mkldnn::memory{ - batch_norm_fwd_pd.weights_primitive_desc(), scaleshift_data.data()}; + // crate mkldnn memory for weights(scale/shift) + auto scaleshift_memory = memory(batch_norm_fwd_pd->weights_primitive_desc(), + scaleshift_data.data()); - if (is_test) { - auto mean_memory = mkldnn::memory{batch_norm_fwd_pd.mean_primitive_desc(), - cast_const_to_void(mean->data())}; + // create mkldnn memory for output y tensor + auto dst_memory = memory(batch_norm_fwd_pd->dst_primitive_desc(), y_data); + if (is_test) { + // create mkldnn memory for stats (as input) + auto mean_memory = memory(batch_norm_fwd_pd->mean_primitive_desc(), + to_void_cast(mean_data)); auto variance_memory = - mkldnn::memory{batch_norm_fwd_pd.variance_primitive_desc(), - cast_const_to_void(variance->data())}; + memory(batch_norm_fwd_pd->variance_primitive_desc(), + to_void_cast(variance_data)); run_batch_norm_op( - batch_norm_fwd_pd, src, (const mkldnn::primitive::at &)mean_memory, + *batch_norm_fwd_pd, src_memory, + (const mkldnn::primitive::at &)mean_memory, (const mkldnn::primitive::at &)variance_memory, scaleshift_memory, - dst); + dst_memory); } else { + // create mkldnn memory for stats (as output) auto mean_memory = - mkldnn::memory{batch_norm_fwd_pd.mean_primitive_desc(), - cast_const_to_void(batch_mean->data())}; + memory(batch_norm_fwd_pd->mean_primitive_desc(), batch_mean_data); + auto variance_memory = memory( + batch_norm_fwd_pd->variance_primitive_desc(), batch_variance_data); - auto variance_memory = - mkldnn::memory{batch_norm_fwd_pd.variance_primitive_desc(), - cast_const_to_void(batch_variance->data())}; - - run_batch_norm_op(batch_norm_fwd_pd, src, - scaleshift_memory, dst, + run_batch_norm_op(*batch_norm_fwd_pd, src_memory, + scaleshift_memory, dst_memory, mean_memory, variance_memory); } if (!is_test) { - const unsigned int in = dims[0]; - const unsigned int sample_size = x->numel() / in / ic; - - // saved_xx is use just in this batch of data - EigenVectorArrayMap saved_mean_e( - batch_mean->mutable_data(ctx.GetPlace()), ic); - EigenVectorArrayMap saved_variance_e( - batch_variance->mutable_data(ctx.GetPlace()), ic); - saved_mean_e.setZero(); - saved_variance_e.setZero(); - - const unsigned int x_arr_size = in * ic; - ConstEigenArrayMap x_arr(x->data(), sample_size, x_arr_size); - for (unsigned int nc = 0; nc < x_arr_size; ++nc) { - saved_mean_e(nc % ic) += x_arr.col(nc).sum(); - } - saved_mean_e /= in * sample_size; - for (unsigned int nc = 0; nc < x_arr_size; ++nc) { - saved_variance_e(nc % ic) += - (x_arr.col(nc) - saved_mean_e(nc % ic)).matrix().squaredNorm(); - } - saved_variance_e /= in * sample_size; - - ConstEigenVectorArrayMap mean_arr{mean->data(), ic}; - ConstEigenVectorArrayMap variance_arr{variance->data(), ic}; - - EigenVectorArrayMap running_mean_arr( - mean_out->mutable_data(ctx.GetPlace()), ic); - EigenVectorArrayMap running_var_arr( - variance_out->mutable_data(ctx.GetPlace()), ic); + // mkldnn only compute stats for current batch + // so we need compute momentum stats via Eigen lib + EigenVectorArrayMap batch_mean_e(batch_mean_data, ic); + EigenVectorArrayMap batch_variance_e(batch_variance_data, ic); + ConstEigenVectorArrayMap mean_e(mean_data, ic); + ConstEigenVectorArrayMap variance_e{variance_data, ic}; + + EigenVectorArrayMap running_mean_e(mean_out_data, ic); + EigenVectorArrayMap running_variance_e(variance_out_data, ic); auto one_minus_momentum = 1. - momentum; - running_mean_arr = - mean_arr * momentum + saved_mean_e * one_minus_momentum; - running_var_arr = - variance_arr * momentum + saved_variance_e * one_minus_momentum; + running_mean_e = mean_e * momentum + batch_mean_e * one_minus_momentum; + running_variance_e = + variance_e * momentum + batch_variance_e * one_minus_momentum; } + + y->set_layout(DataLayout::kMKLDNN); + y->set_format( + (memory::format)dst_memory.get_primitive_desc().desc().data.format); } }; @@ -217,11 +203,6 @@ template class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { public: void Compute(const paddle::framework::ExecutionContext &ctx) const override { - auto data_layout_str = ctx.Attr("data_layout"); - auto data_layout = framework::StringToDataLayout(data_layout_str); - PADDLE_ENFORCE(data_layout == framework::DataLayout::kNCHW, - "MKLDNN batch normalization handles only NCHW data layout"); - auto &dev_ctx = ctx.template device_context(); auto mkldnn_engine = dev_ctx.GetEngine(); @@ -238,88 +219,138 @@ class BatchNormMKLDNNGradOpKernel : public paddle::framework::OpKernel { auto *diff_scale = ctx.Output(framework::GradVarName("Scale")); auto *diff_shift = ctx.Output(framework::GradVarName("Bias")); - diff_x->mutable_data(ctx.GetPlace()); - diff_scale->mutable_data(ctx.GetPlace()); - diff_shift->mutable_data(ctx.GetPlace()); - - auto dims = paddle::framework::vectorize2int(x->dims()); - unsigned flags = mkldnn::use_scale_shift | !mkldnn::use_global_stats; - - auto src_md = - MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); - auto dst_md = - MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); - auto diff_src_md = - MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); - auto diff_dst_md = - MKLDNNMemDesc(dims, memory::data_type::f32, memory::format::nchw); + PADDLE_ENFORCE(diff_y->layout() == DataLayout::kMKLDNN && + diff_y->format() != memory::format::format_undef, + "Wrong layout/format set for Input diff_y tensor"); + + const T *x_data = x->data(); + const T *diff_y_data = diff_y->data(); + const T *batch_mean_data = batch_mean->data(); + const T *batch_variance_data = batch_variance->data(); + const T *scale_data = scale->data(); + const T *shift_data = shift->data(); + T *diff_x_data = diff_x->mutable_data(ctx.GetPlace()); + T *diff_scale_data = diff_scale->mutable_data(ctx.GetPlace()); + T *diff_shift_data = diff_shift->mutable_data(ctx.GetPlace()); + + auto src_tz = paddle::framework::vectorize2int(x->dims()); + auto diff_src_tz = src_tz; + auto dst_tz = src_tz; + auto diff_dst_tz = dst_tz; + auto scale_tz = paddle::framework::vectorize2int(scale->dims()); + PADDLE_ENFORCE(scale_tz.size() == 1, "Dims of scale tensor is NOT 1"); + + const unsigned int ic = scale_tz[0]; + + // Retrieve bn_fwd_pd from device context + const std::string key = ctx.op().Input("SavedMean"); + const std::string key_batch_norm_fwd_pd = key + "@bn_fwd_pd"; + auto batch_norm_fwd_pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_batch_norm_fwd_pd)); + PADDLE_ENFORCE(batch_norm_fwd_pd != nullptr, + "Fail to find batch_norm_fwd_pd in device context"); using bn_bwd_types = bn_type_traits; - using bn_fwd_types = bn_type_traits; - auto batch_norm_fwd_desc = bn_fwd_types::op_desc{ - mkldnn::prop_kind::forward_training, src_md, epsilon, flags}; - auto batch_norm_fwd_pd = - bn_fwd_types::op_prim{batch_norm_fwd_desc, mkldnn_engine}; + // create mkldnn memory from input diff_y tensor - auto batch_norm_bwd_desc = bn_bwd_types::op_desc{ - mkldnn::prop_kind::backward, diff_dst_md, dst_md, epsilon, flags}; - auto batch_norm_bwd_pd = bn_bwd_types::op_prim{ - batch_norm_bwd_desc, mkldnn_engine, batch_norm_fwd_pd}; + mkldnn::memory::format dst_format = + platform::MKLDNNFormatForSize(src_tz.size(), diff_y->format()); - auto src = mkldnn::memory{{src_md, mkldnn_engine}, - cast_const_to_void(x->data())}; + auto user_diff_dst_memory = memory( + {{{diff_dst_tz}, memory::data_type::f32, dst_format}, mkldnn_engine}, + to_void_cast(diff_y_data)); - auto mean = mkldnn::memory{batch_norm_bwd_pd.mean_primitive_desc(), - cast_const_to_void(batch_mean->data())}; + // create mkldnn memory from input x tensor + mkldnn::memory::format input_format = + platform::MKLDNNFormatForSize(src_tz.size(), x->format()); - auto variance = - mkldnn::memory{batch_norm_bwd_pd.variance_primitive_desc(), - cast_const_to_void(batch_variance->data())}; + auto src_memory = memory( + {{{src_tz}, memory::data_type::f32, input_format}, mkldnn_engine}, + to_void_cast(x_data)); - auto diff_dst = mkldnn::memory{{diff_dst_md, mkldnn_engine}, - cast_const_to_void(diff_y->data())}; + // for diff_dst, try to use same format as dst in forward pass + auto diff_dst_pd = batch_norm_fwd_pd.get()->dst_primitive_desc(); + auto diff_dst_md = diff_dst_pd.desc(); - const unsigned int ic = dims[1]; + // create primitive descriptor for batch norm backward + unsigned flags = mkldnn::use_scale_shift; + auto batch_norm_bwd_desc = bn_bwd_types::op_desc{ + mkldnn::prop_kind::backward, diff_dst_md, + src_memory.get_primitive_desc().desc(), epsilon, flags}; + auto batch_norm_bwd_pd = bn_bwd_types::op_prim{ + batch_norm_bwd_desc, mkldnn_engine, *batch_norm_fwd_pd}; + + // reorder user_diff_dst if it's not in preferred format + auto diff_dst_memory = user_diff_dst_memory; + primitive reorder_diff_dst; + bool is_diff_dst_reordered = false; + if (diff_dst_pd != user_diff_dst_memory.get_primitive_desc()) { + diff_dst_memory = memory(diff_dst_pd); + reorder_diff_dst = reorder(user_diff_dst_memory, diff_dst_memory); + is_diff_dst_reordered = true; + } + + // create mkldnn memory for input tensors (src/mean/variance) + auto mean_memory = memory(batch_norm_bwd_pd.mean_primitive_desc(), + to_void_cast(batch_mean_data)); + auto variance_memory = memory(batch_norm_bwd_pd.variance_primitive_desc(), + to_void_cast(batch_variance_data)); + // MKLDNN requires a single piece of memory for scale and shift/bias data const size_t scaleshift_size = 2 * ic; std::vector scaleshift_data; scaleshift_data.reserve(scaleshift_size); - copy_to_weights(scale->data(), scale->data() + ic, shift->data(), - shift->data() + ic, &scaleshift_data); + copy_to_weights(scale_data, scale_data + ic, shift_data, shift_data + ic, + &scaleshift_data); - auto scaleshift_memory = mkldnn::memory{ - batch_norm_bwd_pd.weights_primitive_desc(), scaleshift_data.data()}; + // create mkldnn memory for input tensors (scale/shift) + auto scaleshift_memory = memory(batch_norm_bwd_pd.weights_primitive_desc(), + scaleshift_data.data()); + // create mkldnn memory for output diff weights (combined scale/shift) std::vector diff_scaleshift_data; diff_scaleshift_data.reserve(scaleshift_size); - copy_to_weights(diff_scale->data(), diff_scale->data() + ic, - diff_shift->data(), diff_shift->data() + ic, - &diff_scaleshift_data); - auto diff_scaleshift_memory = - mkldnn::memory{batch_norm_bwd_pd.diff_weights_primitive_desc(), - diff_scaleshift_data.data()}; - - auto diff_src = mkldnn::memory{{diff_src_md, mkldnn_engine}, - static_cast(diff_x->data())}; - - run_batch_norm_op( - batch_norm_bwd_pd, src, mean, variance, diff_dst, scaleshift_memory, - diff_src, diff_scaleshift_memory); - + memory(batch_norm_bwd_pd.diff_weights_primitive_desc(), + diff_scaleshift_data.data()); + + // here assume diff_src is in the same format of src + auto diff_src_memory = memory(src_memory.get_primitive_desc(), diff_x_data); + + // finally create batch_norm backward primitive + auto batch_norm_bwd_prim = + batch_norm_bwd(batch_norm_bwd_pd, src_memory, mean_memory, + variance_memory, diff_dst_memory, scaleshift_memory, + diff_src_memory, diff_scaleshift_memory); + + // execute optional reorder and batch_norm backward primitive + std::vector pipeline; + if (is_diff_dst_reordered) pipeline.push_back(reorder_diff_dst); + pipeline.push_back(batch_norm_bwd_prim); + stream(stream::kind::eager).submit(pipeline).wait(); + + // copy back diff sacle/shift to output tensors (diff scale/shift) + diff_scaleshift_data.resize(scaleshift_size); auto it = std::begin(diff_scaleshift_data); - std::copy(it, std::next(it, ic), diff_scale->data()); + std::copy(it, std::next(it, ic), diff_scale_data); std::copy(std::next(it, ic), std::end(diff_scaleshift_data), - diff_shift->data()); + diff_shift_data); + + // set layout/format of output tensors + diff_x->set_layout(DataLayout::kMKLDNN); + diff_x->set_format((memory::format)diff_src_memory.get_primitive_desc() + .desc() + .data.format); } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_KERNEL(batch_norm, MKLDNN, paddle::platform::CPUPlace, +REGISTER_OP_KERNEL(batch_norm, MKLDNN, ::paddle::platform::CPUPlace, ops::BatchNormMKLDNNOpKernel); -REGISTER_OP_KERNEL(batch_norm_grad, MKLDNN, paddle::platform::CPUPlace, +REGISTER_OP_KERNEL(batch_norm_grad, MKLDNN, ::paddle::platform::CPUPlace, ops::BatchNormMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index b4bd40d031..5912a1a17c 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -22,22 +22,6 @@ limitations under the License. */ namespace paddle { namespace operators { -using Tensor = framework::Tensor; -using LoDTensor = framework::LoDTensor; -using DataLayout = framework::DataLayout; - -template -using EigenArrayMap = - Eigen::Map>; -template -using ConstEigenArrayMap = - Eigen::Map>; -template -using EigenVectorArrayMap = Eigen::Map>; -template -using ConstEigenVectorArrayMap = - Eigen::Map>; - class BatchNormOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; @@ -110,24 +94,25 @@ class BatchNormOp : public framework::OperatorWithKernel { ctx.Input("Variance")->type()), "Variance input should be of float type"); - framework::LibraryType library_{framework::LibraryType::kPlain}; + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::LibraryType library = framework::LibraryType::kPlain; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; #ifdef PADDLE_WITH_MKLDNN - if (library_ == framework::LibraryType::kPlain && + if (library == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { - library_ = framework::LibraryType::kMKLDNN; + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; } #endif - // TODO(pzelazko-intel): enable MKLDNN layout when it's ready - framework::DataLayout layout = framework::DataLayout::kAnyLayout; + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, - library_); + library); } }; class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - BatchNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddAttr("is_test", "").SetDefault(false); AddAttr("momentum", "").SetDefault(0.9); AddAttr("epsilon", "") @@ -150,13 +135,15 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { AddInput("Variance", "The global variance (for training) " "or estimated Variance (for testing)"); - AddOutput("Y", "result after normalization"); + AddOutput("Y", "result after normalization").Reuse("X"); AddOutput("MeanOut", "Share memory with Mean. " - "Store the global mean when training"); + "Store the global mean when training") + .Reuse("Mean"); AddOutput("VarianceOut", "Share memory with Variance. " - "Store the global Variance when training"); + "Store the global Variance when training") + .Reuse("Variance"); AddOutput("SavedMean", "Mean of the current mini batch, " "will apply to output when training") @@ -168,6 +155,9 @@ class BatchNormOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("use_mkldnn", "(bool, default false) Only used in mkldnn kernel") .SetDefault(false); + AddAttr("fuse_with_relu", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); AddComment(R"DOC( Batch Normalization. @@ -226,6 +216,18 @@ class BatchNormKernel saved_mean_e.setZero(); saved_variance_e.setZero(); + EigenVectorArrayMap running_mean_arr( + mean_out->mutable_data(ctx.GetPlace()), C); + EigenVectorArrayMap running_var_arr( + variance_out->mutable_data(ctx.GetPlace()), C); + + if ((N * sample_size) == 1) { + LOG(WARNING) << "Only 1 element in normalization dimension, " + << "we skip the batch norm calculation, let y = x."; + framework::TensorCopySync(*x, ctx.GetPlace(), y); + return; + } + switch (data_layout) { case DataLayout::kNCHW: { ConstEigenArrayMap x_arr(x->data(), sample_size, N * C); @@ -257,10 +259,6 @@ class BatchNormKernel PADDLE_THROW("Unknown storage order: %s", data_layout_str); } - EigenVectorArrayMap running_mean_arr( - mean_out->mutable_data(ctx.GetPlace()), C); - EigenVectorArrayMap running_var_arr( - variance_out->mutable_data(ctx.GetPlace()), C); running_mean_arr = running_mean_arr * momentum + saved_mean_e * (1. - momentum); running_var_arr = @@ -367,18 +365,21 @@ class BatchNormGradOp : public framework::OperatorWithKernel { PADDLE_THROW("can't find Y@GRAD"); } - framework::LibraryType library_{framework::LibraryType::kPlain}; + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::LibraryType library = framework::LibraryType::kPlain; + framework::DataLayout layout = framework::DataLayout::kAnyLayout; + #ifdef PADDLE_WITH_MKLDNN - if (library_ == framework::LibraryType::kPlain && + if (library == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { - library_ = framework::LibraryType::kMKLDNN; + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; } #endif - // TODO(pzelazko-intel): enable MKLDNN layout when it's ready - framework::DataLayout layout = framework::DataLayout::kAnyLayout; + return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - layout, library_); + layout, library); } }; @@ -434,6 +435,11 @@ class BatchNormGradKernel d_bias_arr.setZero(); d_scale_arr.setZero(); + if ((N * sample_size) == 1) { + framework::TensorCopySync(*d_y, ctx.GetPlace(), d_x); + return; + } + const auto scale_inv_var_nhw = scale_arr * inv_var_arr / (N * sample_size); switch (data_layout) { diff --git a/paddle/fluid/operators/batch_norm_op.cu.cc b/paddle/fluid/operators/batch_norm_op.cu.cc index 550dd32d36..ca6cd86693 100644 --- a/paddle/fluid/operators/batch_norm_op.cu.cc +++ b/paddle/fluid/operators/batch_norm_op.cu.cc @@ -72,6 +72,9 @@ class BatchNormKernel int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); + auto *y = ctx.Output("Y"); + y->mutable_data(ctx.GetPlace()); + // ------------------- cudnn descriptors --------------------- cudnnTensorDescriptor_t data_desc_; cudnnTensorDescriptor_t bn_param_desc_; @@ -93,7 +96,7 @@ class BatchNormKernel mode_ = CUDNN_BATCHNORM_SPATIAL; #endif - VLOG(1) << "Setting descriptors."; + VLOG(3) << "Setting descriptors."; std::vector dims; std::vector strides; if (data_layout == DataLayout::kNCHW) { @@ -113,11 +116,6 @@ class BatchNormKernel const auto *scale = ctx.Input("Scale"); const auto *bias = ctx.Input("Bias"); - auto *y = ctx.Output("Y"); - - // alloc memory - y->mutable_data(ctx.GetPlace()); - auto &dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); @@ -162,22 +160,28 @@ class BatchNormKernel functor(dev_ctx, saved_mean, static_cast>(0)); functor(dev_ctx, saved_variance, static_cast>(0)); - double this_factor = 1. - momentum; - - CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( - handle, mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), - data_desc_, x->template data(), data_desc_, - y->template mutable_data(ctx.GetPlace()), bn_param_desc_, - scale->template data>(), - bias->template data>(), this_factor, - mean_out->template mutable_data>( - ctx.GetPlace()), - variance_out->template mutable_data>( - ctx.GetPlace()), - epsilon, saved_mean->template mutable_data>( - ctx.GetPlace()), - saved_variance->template mutable_data>( - ctx.GetPlace()))); + if ((N * H * W * D) == 1) { + LOG(WARNING) << "Only 1 element in normalization dimension, " + << "we skip the batch norm calculation, let y = x."; + framework::TensorCopySync(*x, ctx.GetPlace(), y); + } else { + double this_factor = 1. - momentum; + + CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationForwardTraining( + handle, mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), + data_desc_, x->template data(), data_desc_, + y->template mutable_data(ctx.GetPlace()), bn_param_desc_, + scale->template data>(), + bias->template data>(), this_factor, + mean_out->template mutable_data>( + ctx.GetPlace()), + variance_out->template mutable_data>( + ctx.GetPlace()), + epsilon, saved_mean->template mutable_data>( + ctx.GetPlace()), + saved_variance->template mutable_data>( + ctx.GetPlace()))); + } } // clean when exit. @@ -209,6 +213,25 @@ class BatchNormGradKernel int N, C, H, W, D; ExtractNCWHD(x_dims, data_layout, &N, &C, &H, &W, &D); + // init output + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + d_x->mutable_data(ctx.GetPlace()); + d_scale->mutable_data(ctx.GetPlace()); + d_bias->mutable_data(ctx.GetPlace()); + + auto &dev_ctx = ctx.template device_context(); + if ((N * H * W * D) == 1) { + framework::TensorCopySync(*d_y, ctx.GetPlace(), d_x); + math::SetConstant> + functor; + functor(dev_ctx, d_scale, static_cast>(0)); + functor(dev_ctx, d_bias, static_cast>(0)); + return; + } + PADDLE_ENFORCE_EQ(scale->dims().size(), 1UL); PADDLE_ENFORCE_EQ(scale->dims()[0], C); @@ -247,21 +270,11 @@ class BatchNormGradKernel CUDNN_ENFORCE(platform::dynload::cudnnDeriveBNTensorDescriptor( bn_param_desc_, data_desc_, mode_)); - // init output - auto *d_x = ctx.Output(framework::GradVarName("X")); - auto *d_scale = ctx.Output(framework::GradVarName("Scale")); - auto *d_bias = ctx.Output(framework::GradVarName("Bias")); - - d_x->mutable_data(ctx.GetPlace()); - d_scale->mutable_data(ctx.GetPlace()); - d_bias->mutable_data(ctx.GetPlace()); - const auto *saved_mean = ctx.Input("SavedMean"); const auto *saved_var = ctx.Input("SavedVariance"); const void *saved_mean_data = saved_mean->template data(); const void *saved_var_data = saved_var->template data(); - auto &dev_ctx = ctx.template device_context(); CUDNN_ENFORCE(platform::dynload::cudnnBatchNormalizationBackward( dev_ctx.cudnn_handle(), mode_, CudnnDataType::kOne(), CudnnDataType::kZero(), CudnnDataType::kOne(), diff --git a/paddle/fluid/operators/batch_norm_op.h b/paddle/fluid/operators/batch_norm_op.h index 9e5fc41598..5e3d630d68 100644 --- a/paddle/fluid/operators/batch_norm_op.h +++ b/paddle/fluid/operators/batch_norm_op.h @@ -19,6 +19,22 @@ limitations under the License. */ namespace paddle { namespace operators { +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +using DataLayout = framework::DataLayout; + +template +using EigenArrayMap = + Eigen::Map>; +template +using ConstEigenArrayMap = + Eigen::Map>; +template +using EigenVectorArrayMap = Eigen::Map>; +template +using ConstEigenVectorArrayMap = + Eigen::Map>; + template class BatchNormKernel : public framework::OpKernel { public: diff --git a/paddle/fluid/operators/batch_size_like.h b/paddle/fluid/operators/batch_size_like.h index dd51a11fbe..fc15d56891 100644 --- a/paddle/fluid/operators/batch_size_like.h +++ b/paddle/fluid/operators/batch_size_like.h @@ -53,22 +53,25 @@ class BatchSizeLikeOp : public framework::OperatorWithKernel { class BatchSizeLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - BatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Input", - "(Tensor) Tensor " - "whose input_dim_idx'th dimension specifies the batch_size"); + void Make() final { + AddInput( + "Input", + "Tensor whose input_dim_idx'th dimension specifies the batch_size"); AddOutput("Out", - "(Tensor) Tensor of specified shape will be filled " + "Tensor of specified shape will be filled " "with the specified value"); - AddAttr>("shape", "(vector) The shape of the output"); + AddAttr>("shape", "The shape of the output"); AddAttr("input_dim_idx", - "(int, default 0) The index of input's batch size dimension") + "default 0. The index of input's batch size dimension") .SetDefault(0); AddAttr("output_dim_idx", - "(int, default 0) The index of output's batch size dimension") + "default 0. The index of output's batch size dimension") .SetDefault(0); + Apply(); } + + protected: + virtual void Apply() = 0; }; } // namespace operators diff --git a/paddle/fluid/operators/beam_search_decode_op.cc b/paddle/fluid/operators/beam_search_decode_op.cc index 68fb988afd..10d678111f 100644 --- a/paddle/fluid/operators/beam_search_decode_op.cc +++ b/paddle/fluid/operators/beam_search_decode_op.cc @@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/beam_search_decode_op.h" +#include #include + +#include "paddle/fluid/operators/beam_search_decode_op.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { @@ -22,8 +24,11 @@ namespace operators { struct BeamSearchDecodeFunctor { BeamSearchDecodeFunctor(const LoDTensorArray& step_ids, const LoDTensorArray& step_scores, - LoDTensor* id_tensor, LoDTensor* score_tensor) - : step_ids_origin_(step_ids), + LoDTensor* id_tensor, LoDTensor* score_tensor, + size_t beam_size, int end_id) + : beam_size_(beam_size), + end_id_(end_id), + step_ids_origin_(step_ids), step_scores_origin_(step_scores), id_tensor_(id_tensor), score_tensor_(score_tensor) { @@ -37,9 +42,11 @@ struct BeamSearchDecodeFunctor { // Copy all tensors in the input tensor array for (auto& step_id : step_ids_origin_) { framework::LoDTensor out; - dev_ctx->Wait(); - framework::TensorCopy(step_id, platform::CPUPlace(), *dev_ctx, &out); - dev_ctx->Wait(); + if (step_id.numel() > 0) { + dev_ctx->Wait(); + framework::TensorCopy(step_id, platform::CPUPlace(), *dev_ctx, &out); + dev_ctx->Wait(); + } out.set_lod(step_id.lod()); step_ids_.push_back(out); @@ -53,9 +60,12 @@ struct BeamSearchDecodeFunctor { // Copy all tensors in the input tensor array for (auto& step_score : step_scores_origin_) { framework::LoDTensor out; - dev_ctx->Wait(); - framework::TensorCopy(step_score, platform::CPUPlace(), *dev_ctx, &out); - dev_ctx->Wait(); + if (step_score.numel() > 0) { + dev_ctx->Wait(); + framework::TensorCopy(step_score, platform::CPUPlace(), *dev_ctx, + &out); + dev_ctx->Wait(); + } out.set_lod(step_score.lod()); step_scores_.push_back(out); @@ -67,6 +77,8 @@ struct BeamSearchDecodeFunctor { void operator()() const; bool tensor_on_gpu_; + size_t beam_size_; + int end_id_; const LoDTensorArray& step_ids_origin_; const LoDTensorArray& step_scores_origin_; LoDTensorArray step_ids_ = LoDTensorArray(); @@ -77,14 +89,14 @@ struct BeamSearchDecodeFunctor { template void BeamSearchDecodeFunctor::operator()() const { - BeamSearchDecoder beam_search_decoder; + BeamSearchDecoder beam_search_decoder(beam_size_, end_id_); // Check if the tensor is on GPU. If so, use the CPU copy instead if (tensor_on_gpu_) { - beam_search_decoder.PackAllSteps(step_ids_, step_scores_, id_tensor_, - score_tensor_); + beam_search_decoder.Backtrace(step_ids_, step_scores_, id_tensor_, + score_tensor_); } else { - beam_search_decoder.PackAllSteps(step_ids_origin_, step_scores_origin_, - id_tensor_, score_tensor_); + beam_search_decoder.Backtrace(step_ids_origin_, step_scores_origin_, + id_tensor_, score_tensor_); } } @@ -122,34 +134,51 @@ class BeamSearchDecodeOp : public framework::OperatorBase { "Level of LodTensor should be 2"); } + size_t beam_size = ctx.Attr("beam_size"); + int end_id = ctx.Attr("end_id"); + // prepare output LoDTensor* sentenceIds = ctx.Output("SentenceIds"); LoDTensor* sentenceScores = ctx.Output("SentenceScores"); framework::VisitDataType( framework::ToDataType(scores->at(0).type()), - BeamSearchDecodeFunctor(*ids, *scores, sentenceIds, sentenceScores)); + BeamSearchDecodeFunctor(*ids, *scores, sentenceIds, sentenceScores, + beam_size, end_id)); } }; class BeamSearchDecodeOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchDecodeOpProtoMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Ids", "(LodTensorArray)" - "score of the candidate words in each step"); + "The LodTensorArray containing the selected ids of all steps"); AddInput("Scores", "(LodTensorArray)" - "score of the candidate words in each step"); - AddOutput("SentenceIds", - "(LodTensor)" - "All possible result sentences of word ids"); - AddOutput("SentenceScores", - "(LodTensor)" - "All possible result sentences of word scores"); + "The LodTensorArray containing the selected scores of all steps"); + AddOutput( + "SentenceIds", + "(LodTensor)" + "An LodTensor containing all generated id sequences for all source " + "sentences"); + AddOutput( + "SentenceScores", + "(LodTensor)" + "An LodTensor containing scores corresponding to Output(SentenceIds)"); + AddAttr("beam_size", "beam size for beam search"); + AddAttr("end_id", + "the token id which indicates the end of a sequence"); AddComment(R"DOC( -Pack the result of Beam search op into SentenceIds and SentenceScores. +Beam Search Decode Operator. This Operator constructs the full hypotheses for +each source sentence by walking back along the LoDTensorArray Input(ids) +whose lods can be used to restore the path in the beam search tree. + +The Output(SentenceIds) and Output(SentenceScores) separately contain the +generated id sequences and the corresponding scores. The shapes and lods of the +two LodTensor are same. The lod level is 2 and the two levels separately +indicate how many hypotheses each source sentence has and how many ids each +hypothesis has. )DOC"); } }; @@ -173,10 +202,12 @@ class BeamSearchDecodeInferVarType : public framework::VarTypeInference { void operator()(const framework::OpDesc& op_desc, framework::BlockDesc* block) const override { for (auto& o : op_desc.Output("SentenceIds")) { - block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); + auto& sentence_ids = block->FindRecursiveOrCreateVar(o); + sentence_ids.SetType(framework::proto::VarType::LOD_TENSOR); } for (auto& o : op_desc.Output("SentenceScores")) { - block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); + auto& sentence_scores = block->FindRecursiveOrCreateVar(o); + sentence_scores.SetType(framework::proto::VarType::LOD_TENSOR); } } }; diff --git a/paddle/fluid/operators/beam_search_decode_op.h b/paddle/fluid/operators/beam_search_decode_op.h index 3c01f81c83..6aefc5446f 100644 --- a/paddle/fluid/operators/beam_search_decode_op.h +++ b/paddle/fluid/operators/beam_search_decode_op.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include #include + #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" @@ -25,42 +27,12 @@ using LoDTensor = framework::LoDTensor; using LoDTensorArray = framework::LoDTensorArray; // all the lod have 2 levels. -// The First is source level, the second is sentence level. -// source level describe how many candidate words for this source. -// sentence level describe these candidates belong to which prefix +// The first is source level, the second is sentence level. +// source level describe how many prefixes (branchs) for each source sentece +// (beam). sentence level describe how these candidates belong to the prefixes. const size_t kSourceLevel = 0; const size_t kSentenceLevel = 1; -template -struct BeamNode { - BeamNode(int64_t word_id, T score) : word_id_(word_id), score_(score) {} - - ~BeamNode() { - if (parent_) { - parent_->DropKid(this); - if (parent_->kids_.size() == 0UL) { - delete parent_; - } - } - VLOG(3) << "Delete BeamNode root with word_id:" << this->word_id_; - } - - void AppendTo(BeamNode* parent) { - parent_ = parent; - parent->kids_.insert(this); - } - - void DropKid(BeamNode* kid) { kids_.erase(kid); } - - BeamNode* parent_ = nullptr; - std::unordered_set kids_; - int64_t word_id_; - T score_; -}; - -template -using BeamNodeVector = std::vector>>; - template struct Sentence { std::vector word_ids; @@ -72,24 +44,8 @@ using SentenceVector = std::vector>; template struct BeamSearchDecoder { - /** - * make a BeamNode and all it's related prefix BeanNode into a Sentence. - */ - Sentence MakeSentence(const BeamNode* node) const; - - /** - * Param: - * cur_ids: LoDTensor of One step for word ID - * cur_scores: LoDTensor of One Step for word score - * prefixes_list: prefixes for each source sentence. - * sentence_vector_list: result sentence_vector for each source sentence. - * Return: - * a new prefixes list for each source of current step - */ - std::vector> PackTwoSteps( - const LoDTensor& cur_ids, const LoDTensor& cur_scores, - std::vector>* prefixes_list, - std::vector>* sentence_vector_list) const; + BeamSearchDecoder(size_t beam_size, int end_id) + : beam_size_(beam_size), end_id_(end_id) {} /** * convert the result sentence_vector for each source sentence into two @@ -100,107 +56,30 @@ struct BeamSearchDecoder { * sentence_vector_list: sentence_vector for each source sentence. * id_tensor: result LoDTensor for sentences of id. * score_tensor: result LoDTensor for sentences of score. + * reverse: whether ids of sentence in sentence_vector_list is reversed + * sort_by_score: whether to sort hypotheses of each sentence by scores. */ void ConvertSentenceVectorToLodTensor( std::vector> sentence_vector_list, LoDTensor* id_tensor, - LoDTensor* score_tensor) const; + LoDTensor* score_tensor, bool reverse = true, + bool sort_by_score = true) const; /** - * Pack all steps of id/score LodTensor into sentence LoDTensor - * it's main logic is: - * ```python - * prefix - * result_sentence - * result_lod_tensor - * - * for (step in steps): - * prefix = PackTwoSteps(prefix, step, &result_sentence) - * ConvertSentenceVectorToLodTensor(result_sentence, &result_lod_tensor) - * ``` + * Gather the hypotheses for each source sentence by backtrace though the + * LoDTensorArray step_ids whose lods reserve the path in the tree. */ - void PackAllSteps(const LoDTensorArray& step_ids, - const LoDTensorArray& step_scores, LoDTensor* id_tensor, - LoDTensor* score_tensor) const; -}; - -template -Sentence BeamSearchDecoder::MakeSentence(const BeamNode* node) const { - Sentence sentence; - while (node != nullptr) { - sentence.word_ids.emplace_back(node->word_id_); - sentence.scores.emplace_back(node->score_); - node = node->parent_; - } - - std::reverse(std::begin(sentence.word_ids), std::end(sentence.word_ids)); - std::reverse(std::begin(sentence.scores), std::end(sentence.scores)); - - return sentence; -} - -template -std::vector> BeamSearchDecoder::PackTwoSteps( - const LoDTensor& cur_ids, const LoDTensor& cur_scores, - std::vector>* prefixes_list, - std::vector>* sentence_vector_list) const { - std::vector> result; + void Backtrace(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, LoDTensor* id_tensor, + LoDTensor* score_tensor) const; - for (size_t src_idx = 0; src_idx < cur_ids.lod()[kSourceLevel].size() - 1; - ++src_idx) { - size_t src_start = cur_ids.lod().at(kSourceLevel)[src_idx]; - size_t src_end = cur_ids.lod().at(kSourceLevel)[src_idx + 1]; - - BeamNodeVector beam_nodes; - - // if prefixes size is 0, it means this is the first step. In this step, - // all candidate id is the start of candidate sentences. - if (prefixes_list->empty()) { - PADDLE_ENFORCE_EQ(cur_ids.lod().at(kSourceLevel).back(), - cur_ids.lod().at(kSentenceLevel).back(), - "in the first step"); - for (size_t id_idx = src_start; id_idx < src_end; ++id_idx) { - beam_nodes.push_back(std::unique_ptr>(new BeamNode( - cur_ids.data()[id_idx], cur_scores.data()[id_idx]))); - } - } else { - BeamNodeVector& prefixes = prefixes_list->at(src_idx); - SentenceVector& sentence_vector = (*sentence_vector_list)[src_idx]; - - PADDLE_ENFORCE_EQ(src_end - src_start, prefixes.size(), - "prefix and candidate set number should be the same"); - - auto candidate_offset = cur_ids.lod()[kSentenceLevel]; - for (size_t prefix_idx = 0; prefix_idx < prefixes.size(); ++prefix_idx) { - std::unique_ptr>& prefix = prefixes[prefix_idx]; - size_t candidate_start = candidate_offset[src_start + prefix_idx]; - size_t candidate_end = candidate_offset[src_start + prefix_idx + 1]; - if (candidate_start == candidate_end) { - VLOG(3) << "this sentence has no more candidate, " - "add to result sentence and rm it from beam tree"; - sentence_vector.push_back(MakeSentence(prefix.get())); - prefix.reset(); - } else { - for (size_t candidate_idx = candidate_start; - candidate_idx < candidate_end; ++candidate_idx) { - auto* candidate = - new BeamNode(cur_ids.data()[candidate_idx], - cur_scores.data()[candidate_idx]); - candidate->AppendTo(prefix.get()); - beam_nodes.push_back(std::unique_ptr>(candidate)); - } - prefix.release(); - } - } - } - result.push_back(std::move(beam_nodes)); - } - return result; -} + size_t beam_size_; + int end_id_; +}; template void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( std::vector> sentence_vector_list, LoDTensor* id_tensor, - LoDTensor* score_tensor) const { + LoDTensor* score_tensor, bool reverse, bool sort_by_score) const { size_t src_num = sentence_vector_list.size(); PADDLE_ENFORCE_NE(src_num, 0, "src_num should not be 0"); @@ -211,11 +90,29 @@ void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( std::vector score_data; for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { + if (sort_by_score) { + sort(sentence_vector_list[src_idx].begin(), + sentence_vector_list[src_idx].end(), + [reverse](const Sentence& a, const Sentence& b) { + if (reverse) + return a.scores.front() > b.scores.front(); + else + return a.scores.back() > b.scores.back(); + }); + } for (Sentence& sentence : sentence_vector_list[src_idx]) { - id_data.insert(id_data.end(), sentence.word_ids.begin(), - sentence.word_ids.end()); - score_data.insert(score_data.end(), sentence.scores.begin(), - sentence.scores.end()); + if (reverse) { + id_data.insert(id_data.end(), sentence.word_ids.rbegin(), + sentence.word_ids.rend()); + score_data.insert(score_data.end(), sentence.scores.rbegin(), + sentence.scores.rend()); + } else { + id_data.insert(id_data.end(), sentence.word_ids.begin(), + sentence.word_ids.end()); + score_data.insert(score_data.end(), sentence.scores.begin(), + sentence.scores.end()); + } + sentence_level_lod.push_back(sentence_level_lod.back() + sentence.word_ids.size()); } @@ -243,39 +140,75 @@ void BeamSearchDecoder::ConvertSentenceVectorToLodTensor( } template -void BeamSearchDecoder::PackAllSteps(const LoDTensorArray& step_ids, - const LoDTensorArray& step_scores, - LoDTensor* id_tensor, - LoDTensor* score_tensor) const { +void BeamSearchDecoder::Backtrace(const LoDTensorArray& step_ids, + const LoDTensorArray& step_scores, + LoDTensor* id_tensor, + LoDTensor* score_tensor) const { PADDLE_ENFORCE(!step_ids.empty(), "step num should be larger than 0"); PADDLE_ENFORCE_EQ(step_ids.size(), step_scores.size(), "step_ids and step_scores should be the same"); const size_t step_num = step_ids.size(); const size_t src_num = step_ids.at(0).lod().at(kSourceLevel).size() - 1; + std::vector> sentence_vector_list( + src_num, SentenceVector(beam_size_)); + std::vector> prefix_idx_vector_list(src_num); + for (int step_id = step_num - 1; step_id >= 0; --step_id) { + auto& cur_ids = step_ids.at(step_id); + auto& cur_scores = step_scores.at(step_id); + for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { + // for each source sentence + auto& sentence_vector = sentence_vector_list.at(src_idx); + auto& prefix_idx_vector = prefix_idx_vector_list.at(src_idx); + size_t src_prefix_start = cur_ids.lod().at(kSourceLevel)[src_idx]; + size_t src_prefix_end = cur_ids.lod().at(kSourceLevel)[src_idx + 1]; + if (prefix_idx_vector.empty()) { // be finished and pruned at this step + // or the last time step + for (size_t prefix_idx = src_prefix_start; prefix_idx < src_prefix_end; + ++prefix_idx) { + size_t candidate_start = cur_ids.lod().at(kSentenceLevel)[prefix_idx]; + size_t candidate_end = + cur_ids.lod().at(kSentenceLevel)[prefix_idx + 1]; + for (size_t candidate_idx = candidate_start; + candidate_idx < candidate_end; ++candidate_idx) { + prefix_idx_vector.push_back(prefix_idx); + size_t idx = prefix_idx_vector.size() - 1; + auto cur_id = cur_ids.data()[candidate_idx]; + auto cur_score = cur_scores.data()[candidate_idx]; + sentence_vector.at(idx).word_ids.push_back(cur_id); + sentence_vector.at(idx).scores.push_back(cur_score); + } + } + } else { // use prefix_idx_vector to backtrace + size_t src_candidate_start = + cur_ids.lod().at(kSentenceLevel)[src_prefix_start]; + size_t prefix_idx = src_prefix_start; + size_t candidate_num = + cur_ids.lod().at(kSentenceLevel)[prefix_idx + 1] - + cur_ids.lod().at(kSentenceLevel)[prefix_idx]; + for (size_t idx = 0; idx < prefix_idx_vector.size(); ++idx) { + auto candidate_idx = prefix_idx_vector.at(idx); + auto cur_id = cur_ids.data()[candidate_idx]; + auto cur_score = cur_scores.data()[candidate_idx]; + if (cur_id != end_id_ || sentence_vector.at(idx).word_ids.empty()) { + // to skip redundant end tokens + sentence_vector.at(idx).word_ids.push_back(cur_id); + sentence_vector.at(idx).scores.push_back(cur_score); + } - PADDLE_ENFORCE_GT(src_num, 0UL, "source num should be larger than 0"); - - // previous prefixes for each step, - // the init length is 0, means this is the first step. - std::vector> beamnode_vector_list(0); - std::vector> sentence_vector_list(src_num); - - // pack all steps for one batch first, then another batch - for (size_t step_id = 0; step_id < step_num; ++step_id) { - beamnode_vector_list = - PackTwoSteps(step_ids.at(step_id), step_scores.at(step_id), - &beamnode_vector_list, &sentence_vector_list); - } - // append last beam_node to result - for (size_t src_idx = 0; src_idx < src_num; ++src_idx) { - for (auto& beam_node : beamnode_vector_list.at(src_idx)) { - sentence_vector_list[src_idx].push_back(MakeSentence(beam_node.get())); - beam_node.reset(); + while (src_candidate_start + candidate_num <= + candidate_idx) { // search the corresponding prefix + prefix_idx++; + candidate_num += cur_ids.lod().at(kSentenceLevel)[prefix_idx + 1] - + cur_ids.lod().at(kSentenceLevel)[prefix_idx]; + } + prefix_idx_vector.at(idx) = prefix_idx; + } + } } } ConvertSentenceVectorToLodTensor(sentence_vector_list, id_tensor, - score_tensor); + score_tensor, true, true); } } // namespace operators diff --git a/paddle/fluid/operators/beam_search_decode_op_test.cc b/paddle/fluid/operators/beam_search_decode_op_test.cc index 36f9594969..88339e38d8 100644 --- a/paddle/fluid/operators/beam_search_decode_op_test.cc +++ b/paddle/fluid/operators/beam_search_decode_op_test.cc @@ -20,15 +20,11 @@ using LoD = paddle::framework::LoD; using LoDTensor = paddle::framework::LoDTensor; using LoDTensorArray = paddle::framework::LoDTensorArray; -template -using BeamNode = paddle::operators::BeamNode; template using BeamSearchDecoder = paddle::operators::BeamSearchDecoder; template using Sentence = paddle::operators::Sentence; template -using BeamNodeVector = paddle::operators::BeamNodeVector; -template using SentenceVector = paddle::operators::SentenceVector; namespace paddle { @@ -77,138 +73,50 @@ void GenerateExample(const std::vector& level_0, } // namespace test } // namespace paddle -TEST(BeamSearchDecodeOp, DeleteBeamNode) { - auto* root = new BeamNode(0, 0); - auto* b1 = new BeamNode(1, 1); - auto* b2 = new BeamNode(2, 2); - auto* b3 = new BeamNode(3, 3); - - b1->AppendTo(root); - b2->AppendTo(root); - b3->AppendTo(b1); - - delete b3; - delete b2; -} - -TEST(BeamSearchDecodeOp, MakeSentence) { - auto* root = new BeamNode(0, 0); - auto* b1 = new BeamNode(1, 1); - auto* end = new BeamNode(2, 2); - b1->AppendTo(root); - end->AppendTo(b1); - - BeamSearchDecoder helper; - Sentence sentence = helper.MakeSentence(end); - delete end; - - std::vector expect_ids = {0, 1, 2}; - ASSERT_EQ(sentence.word_ids, expect_ids); - - std::vector expect_scores = {0, 1, 2}; - ASSERT_EQ(sentence.scores, expect_scores); -} - -TEST(BeamSearchDecodeOp, PackTwoStepsFistStep) { - CPUPlace place; - - LoDTensorArray ids; - LoDTensorArray scores; - - paddle::test::GenerateExample( - std::vector{0, 2, 6}, std::vector{0, 1, 2, 3, 4, 5, 6}, - std::vector{1, 2, 3, 4, 5, 6}, &ids, &scores); - - std::vector> beamnode_vector_list; - std::vector> sentence_vector_list( - 2, SentenceVector()); - - BeamSearchDecoder helper; - beamnode_vector_list = helper.PackTwoSteps( - ids[0], scores[0], &beamnode_vector_list, &sentence_vector_list); - ASSERT_EQ(beamnode_vector_list.size(), 2UL); - ASSERT_EQ(beamnode_vector_list[0].size(), 2UL); - ASSERT_EQ(beamnode_vector_list[1].size(), 4UL); -} - -TEST(BeamSearchDecodeOp, PackTwoSteps) { - CPUPlace place; - - // first source has three prefix - BeamNodeVector source0_prefixes; - source0_prefixes.push_back( - std::unique_ptr>(new BeamNode(1, 1))); - source0_prefixes.push_back( - std::unique_ptr>(new BeamNode(0, 0))); - source0_prefixes.push_back( - std::unique_ptr>(new BeamNode(3, 3))); - - // second source has two prefix - BeamNodeVector source1_prefixes; - source1_prefixes.push_back( - std::unique_ptr>(new BeamNode(4, 4))); - source1_prefixes.push_back( - std::unique_ptr>(new BeamNode(5, 5))); - - std::vector> beamnode_vector_list; - std::vector> sentence_vector_list( - 2, SentenceVector()); - - beamnode_vector_list.push_back(std::move(source0_prefixes)); - beamnode_vector_list.push_back(std::move(source1_prefixes)); - - // generate data for one step - LoDTensorArray ids; - LoDTensorArray scores; - - paddle::test::GenerateExample(std::vector{0, 3, 5}, - std::vector{0, 1, 1, 3, 4, 5}, - std::vector{0, 1, 2, 3, 4}, &ids, &scores); - - BeamSearchDecoder helper1; - beamnode_vector_list = helper1.PackTwoSteps( - ids[0], scores[0], &beamnode_vector_list, &sentence_vector_list); - - ASSERT_EQ(sentence_vector_list[0].size(), 1UL); - ASSERT_EQ(sentence_vector_list[1].size(), 0UL); - ASSERT_EQ(beamnode_vector_list[0].size(), 3UL); - ASSERT_EQ(beamnode_vector_list[1].size(), 2UL); -} - -TEST(BeamSearchDecodeOp, PackAllSteps) { +TEST(BeamSearchDecodeOp, Backtrace) { CPUPlace place; - // we will constuct a sample data with 3 steps and 2 source sentences + // Construct sample data with 5 steps and 2 source sentences + // beam_size = 2, start_id = 0, end_id = 1 LoDTensorArray ids; LoDTensorArray scores; paddle::test::GenerateExample( - std::vector{0, 3, 6}, std::vector{0, 1, 2, 3, 4, 5, 6}, - std::vector{1, 2, 3, 4, 5, 6}, &ids, &scores); + std::vector{0, 1, 2}, std::vector{0, 1, 2}, + std::vector{0, 0}, &ids, &scores); // start with start_id + paddle::test::GenerateExample(std::vector{0, 1, 2}, + std::vector{0, 2, 4}, + std::vector{2, 3, 4, 5}, &ids, &scores); + paddle::test::GenerateExample(std::vector{0, 2, 4}, + std::vector{0, 2, 2, 4, 4}, + std::vector{3, 1, 5, 4}, &ids, &scores); + paddle::test::GenerateExample(std::vector{0, 2, 4}, + std::vector{0, 1, 2, 3, 4}, + std::vector{1, 1, 3, 5}, &ids, &scores); paddle::test::GenerateExample( - std::vector{0, 3, 6}, std::vector{0, 1, 1, 3, 5, 5, 6}, - std::vector{0, 1, 2, 3, 4, 5}, &ids, &scores); - paddle::test::GenerateExample(std::vector{0, 3, 6}, - std::vector{0, 0, 1, 2, 3, 4, 5}, - std::vector{0, 1, 2, 3, 4}, &ids, &scores); + std::vector{0, 2, 4}, + std::vector{0, 0, 0, 2, + 2}, // the branchs of the first source sentence + // are pruned since finished + std::vector{5, 1}, + &ids, &scores); - ASSERT_EQ(ids.size(), 3UL); - ASSERT_EQ(scores.size(), 3UL); + ASSERT_EQ(ids.size(), 5UL); + ASSERT_EQ(scores.size(), 5UL); - BeamSearchDecoder helper; + BeamSearchDecoder helper(2, 1); // beam_size = 2, end_id = 1 LoDTensor id_tensor; LoDTensor score_tensor; - helper.PackAllSteps(ids, scores, &id_tensor, &score_tensor); + helper.Backtrace(ids, scores, &id_tensor, &score_tensor); LoD lod = id_tensor.lod(); - std::vector expect_source_lod = {0, 4, 8}; + std::vector expect_source_lod = {0, 2, 4}; EXPECT_EQ(lod[0], expect_source_lod); - std::vector expect_sentence_lod = {0, 1, 3, 6, 9, 10, 13, 16, 19}; + std::vector expect_sentence_lod = {0, 4, 7, 12, 17}; EXPECT_EQ(lod[1], expect_sentence_lod); - // 2| 1, 0| 3, 1, 0| 3, 2, 1| 5| 4, 3, 2| 4, 4, 3| 6, 5, 4 - std::vector expect_data = {2, 1, 0, 3, 1, 0, 3, 2, 1, 5, - 4, 3, 2, 4, 4, 3, 6, 5, 4}; + std::vector expect_data = {0, 2, 3, 1, 0, 2, 1, 0, 4, + 5, 3, 5, 0, 4, 5, 3, 1}; ASSERT_EQ(id_tensor.dims()[0], static_cast(expect_data.size())); for (size_t i = 0; i < expect_data.size(); ++i) { ASSERT_EQ(id_tensor.data()[i], diff --git a/paddle/fluid/operators/beam_search_op.cc b/paddle/fluid/operators/beam_search_op.cc index cff097cca1..62771d09f1 100644 --- a/paddle/fluid/operators/beam_search_op.cc +++ b/paddle/fluid/operators/beam_search_op.cc @@ -12,25 +12,26 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/beam_search_op.h" - #include #include #include #include + #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/beam_search_op.h" namespace paddle { namespace operators { void BeamSearch::operator()(const framework::LoDTensor &pre_ids, + const framework::LoDTensor &pre_scores, framework::LoDTensor *selected_ids, framework::LoDTensor *selected_scores) { auto abs_lod = framework::ToAbsOffset(ids_->lod()); auto &high_level = abs_lod[lod_level_]; - auto items = SelectTopBeamSizeItems(); + auto items = SelectTopBeamSizeItems(pre_ids, pre_scores); auto selected_items = ToMap(items, high_level.back()); VLOG(3) << "selected_items:"; for (size_t i = 0; i < selected_items.size(); ++i) { @@ -39,7 +40,8 @@ void BeamSearch::operator()(const framework::LoDTensor &pre_ids, VLOG(3) << ItemToString(item); } } - PruneEndidCandidates(pre_ids, &selected_items); + + PruneEndBeams(pre_ids, &selected_items); // calculate the output tensor's height size_t num_instances = std::accumulate( std::begin(selected_items), std::end(selected_items), 0, @@ -61,12 +63,6 @@ void BeamSearch::operator()(const framework::LoDTensor &pre_ids, size_t low_offset = 0; for (auto &items : selected_items) { low_level.push_back(low_offset); - sort(items.begin(), items.end(), [](const Item &a, const Item &b) { - if (a.offset < b.offset) { - return true; - } - return a.id < b.id; - }); for (auto &item : items) { ids_data[low_offset] = item.id; scores_data[low_offset] = item.score; @@ -86,21 +82,31 @@ void BeamSearch::operator()(const framework::LoDTensor &pre_ids, selected_scores->set_lod(lod); } -int BeamSearch::PruneEndidCandidates(const framework::LoDTensor &pre_ids, - std::vector> *items) { +void BeamSearch::PruneEndBeams(const framework::LoDTensor &pre_ids, + std::vector> *items) { auto *pre_ids_data = pre_ids.data(); - - int res = 0; - for (size_t offset = 0; offset < items->size(); offset++) { - auto prefix_id = pre_ids_data[offset]; - if (prefix_id == end_id_) { - items->at(offset).clear(); - } else { - res++; + auto abs_lod = framework::ToAbsOffset(ids_->lod()); + auto &high_level = abs_lod[lod_level_]; + for (size_t src_idx = 0; src_idx < high_level.size() - 1; ++src_idx) { + size_t src_prefix_start = high_level[src_idx]; + size_t src_prefix_end = high_level[src_idx + 1]; + bool finish_flag = true; + for (size_t offset = src_prefix_start; offset < src_prefix_end; offset++) { + for (auto &item : items->at(offset)) { + if (item.id != static_cast(end_id_) || + pre_ids_data[offset] != end_id_) { + finish_flag = false; + break; + } + } + if (!finish_flag) break; + } + if (finish_flag) { // all branchs of the beam (source sentence) end and + // prune this beam + for (size_t offset = src_prefix_start; offset < src_prefix_end; offset++) + items->at(offset).clear(); } } - - return res; } std::vector> BeamSearch::ToMap( @@ -115,19 +121,17 @@ std::vector> BeamSearch::ToMap( return result; } -std::vector> -BeamSearch::SelectTopBeamSizeItems() { +std::vector> BeamSearch::SelectTopBeamSizeItems( + const framework::LoDTensor &pre_ids, + const framework::LoDTensor &pre_scores) { std::vector> result; std::vector items; // for each source sentence, select the top beam_size items across all // candidate sets. - while (NextItemSet(&items)) { - std::nth_element(std::begin(items), std::begin(items) + beam_size_, - std::end(items), [](const Item &a, const Item &b) { - // TODO(superjom) make score's comparation customizable. - // partial sort in descending order - return a.score > b.score; - }); + while (NextItemSet(pre_ids, pre_scores, &items)) { + std::nth_element( + std::begin(items), std::begin(items) + beam_size_, std::end(items), + [](const Item &a, const Item &b) { return a.score > b.score; }); // prune the top beam_size items. if (items.size() > beam_size_) { items.resize(beam_size_); @@ -146,7 +150,9 @@ BeamSearch::SelectTopBeamSizeItems() { } // the candidates of a source -bool BeamSearch::NextItemSet(std::vector *items) { +bool BeamSearch::NextItemSet(const framework::LoDTensor &pre_ids, + const framework::LoDTensor &pre_scores, + std::vector *items) { if (sent_offset_ >= ids_->NumElements(lod_level_)) { return false; } @@ -164,14 +170,24 @@ bool BeamSearch::NextItemSet(std::vector *items) { instance_dim *= ids.dims()[i]; } + auto *pre_ids_data = pre_ids.data(); + auto *pre_scores_data = pre_scores.data(); items->clear(); items->reserve(framework::product(ids.dims())); for (size_t offset = abs_lod[lod_level_][sent_offset_]; offset < abs_lod[lod_level_][sent_offset_ + 1]; offset++) { - for (size_t d = 0; d < instance_dim; d++) { - const size_t dim_offset = offset * instance_dim + d; - items->emplace_back(offset, ids_data[dim_offset], - scores_data[dim_offset]); + auto pre_id = pre_ids_data[offset]; + auto pre_score = pre_scores_data[offset]; + if (pre_id == end_id_) { + // Allocate all probability mass to eos_id for finished branchs and the + // other candidate ids can be ignored. + items->emplace_back(offset, end_id_, pre_score); + } else { + for (size_t d = 0; d < instance_dim; d++) { + const size_t dim_offset = offset * instance_dim + d; + items->emplace_back(offset, ids_data[dim_offset], + scores_data[dim_offset]); + } } } @@ -197,18 +213,29 @@ std::string ItemToString(const BeamSearch::Item &item) { class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker { public: - BeamSearchOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { // inputs and outputs stored in proto - AddInput("pre_ids", "ids in previous step"); - AddInput("ids", "a LoDTensor of shape of [None,k]"); + AddInput("pre_ids", + "(LoDTensor) The LoDTensor containing the selected ids at the " + "previous step. It should be a tensor with shape (batch_size, 1) " + "and lod `[[0, 1, ... , batch_size], [0, 1, ..., batch_size]]` at " + "thefirst step."); + AddInput("pre_scores", + "(LoDTensor) The LoDTensor containing the accumulated " + "scores corresponding to the selected ids at the previous step."); + AddInput("ids", + "(LoDTensor) The LoDTensor containing the candidates ids. Its " + "shape should be (batch_size * beam_size, K), where K supposed to " + "be beam_size."); AddInput("scores", - "a LoDTensor that has the same shape and LoD with `ids`"); + "(LoDTensor) The LodTensor containing the accumulated scores " + "corresponding to Input(ids) and its shape is the same as the " + "shape of Input(ids)."); AddOutput("selected_ids", - "a LoDTensor that stores the IDs selected by beam search"); - AddOutput( - "selected_scores", - "a LoDTensor that has the same shape and LoD with `selected_ids`"); + "A LodTensor that stores the IDs selected by beam search."); + AddOutput("selected_scores", + "A LoDTensor containing the accumulated scores corresponding to " + "Output(selected_ids)."); // Attributes stored in AttributeMap AddAttr("level", "the level of LoDTensor"); @@ -216,8 +243,21 @@ class BeamSearchOpMaker : public framework::OpProtoAndCheckerMaker { AddAttr("end_id", "the token id which indicates the end of a sequence"); - AddComment( - "This is a beam search operator that help to generate sequences."); + AddComment(R"DOC( +This operator does the search in beams for one time step. +Specifically, it selects the top-K candidate word ids of current step from +Input(ids) according to their Input(scores) for all source sentences, +where K is Attr(beam_size) and Input(ids), Input(scores) are predicted results +from the computation cell. Additionally, Input(pre_ids) and Input(pre_scores) +are the output of beam_search at previous step, they are needed for special use +to handle ended candidate translations. The paths linking prefixes and selected +candidates are organized and reserved in lod. + +Note that the Input(scores) passed in should be accumulated scores, and +length penalty should be done with extra operators before calculating the +accumulated scores if needed, also suggest finding top-K before it and +using the top-K candidates following. +)DOC"); } }; @@ -254,10 +294,12 @@ class BeamSearchInferVarType : public framework::VarTypeInference { void operator()(const framework::OpDesc &op_desc, framework::BlockDesc *block) const override { for (auto &o : op_desc.Output("selected_ids")) { - block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); + auto &selected_ids = block->FindRecursiveOrCreateVar(o); + selected_ids.SetType(framework::proto::VarType::LOD_TENSOR); } for (auto &o : op_desc.Output("selected_scores")) { - block->Var(o)->SetType(framework::proto::VarType::LOD_TENSOR); + auto &selected_scores = block->FindRecursiveOrCreateVar(o); + selected_scores.SetType(framework::proto::VarType::LOD_TENSOR); } } }; diff --git a/paddle/fluid/operators/beam_search_op.h b/paddle/fluid/operators/beam_search_op.h index 9b51db8a45..b5e2ed0592 100644 --- a/paddle/fluid/operators/beam_search_op.h +++ b/paddle/fluid/operators/beam_search_op.h @@ -14,10 +14,6 @@ limitations under the License. */ #pragma once -#ifdef PADDLE_WITH_TESTING -#include "gtest/gtest.h" -#endif - #include #include #include "paddle/fluid/framework/lod_tensor.h" @@ -136,6 +132,7 @@ class BeamSearch { * that means no candidates is provided, and the task will stop running. */ void operator()(const framework::LoDTensor& pre_ids, + const framework::LoDTensor& pre_scores, framework::LoDTensor* selected_ids, framework::LoDTensor* selected_scores); /* @@ -157,14 +154,16 @@ class BeamSearch { protected: /* - * Delete all the records that follows the end token. + * Prune the source sentences all branchs finished, and it is optional. + * Pruning must one step later than finishing (thus pre_ids is needed here), + * since the end tokens must be writed out. */ - int PruneEndidCandidates(const framework::LoDTensor& pre_ids, - std::vector>* items); + void PruneEndBeams(const framework::LoDTensor& pre_ids, + std::vector>* items); /* * Transform the items into a map whose key is offset, value is the items. - * NOTE low performance + * NOTE low performance. */ std::vector> ToMap( const std::vector>& inputs, size_t element_num); @@ -172,12 +171,16 @@ class BeamSearch { /* * For each source, select top beam_size records. */ - std::vector> SelectTopBeamSizeItems(); + std::vector> SelectTopBeamSizeItems( + const framework::LoDTensor& pre_ids, + const framework::LoDTensor& pre_scores); /* * Get the items of next source sequence, return false if no remaining items. */ - bool NextItemSet(std::vector* items); + bool NextItemSet(const framework::LoDTensor& pre_ids, + const framework::LoDTensor& pre_scores, + std::vector* items); private: size_t beam_size_; @@ -196,24 +199,25 @@ template class BeamSearchOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* ids_var = context.Input("ids"); - auto* scores_var = context.Input("scores"); - auto* pre_ids_var = context.Input("pre_ids"); - PADDLE_ENFORCE_NOT_NULL(ids_var); - PADDLE_ENFORCE_NOT_NULL(scores_var); - PADDLE_ENFORCE_NOT_NULL(pre_ids_var); + auto* ids = context.Input("ids"); + auto* scores = context.Input("scores"); + auto* pre_ids = context.Input("pre_ids"); + auto* pre_scores = context.Input("pre_scores"); + PADDLE_ENFORCE_NOT_NULL(ids); + PADDLE_ENFORCE_NOT_NULL(scores); + PADDLE_ENFORCE_NOT_NULL(pre_ids); + PADDLE_ENFORCE_NOT_NULL(pre_scores); size_t level = context.Attr("level"); size_t beam_size = context.Attr("beam_size"); int end_id = context.Attr("end_id"); - BeamSearch alg(*ids_var, *scores_var, level, beam_size, end_id); - auto selected_ids_var = - context.Output("selected_ids"); - auto selected_scores_var = + BeamSearch alg(*ids, *scores, level, beam_size, end_id); + auto selected_ids = context.Output("selected_ids"); + auto selected_scores = context.Output("selected_scores"); - PADDLE_ENFORCE_NOT_NULL(selected_ids_var); - PADDLE_ENFORCE_NOT_NULL(selected_scores_var); - alg(*pre_ids_var, selected_ids_var, selected_scores_var); + PADDLE_ENFORCE_NOT_NULL(selected_ids); + PADDLE_ENFORCE_NOT_NULL(selected_scores); + alg(*pre_ids, *pre_scores, selected_ids, selected_scores); } }; } // namespace operators diff --git a/paddle/fluid/operators/beam_search_op_test.cc b/paddle/fluid/operators/beam_search_op_test.cc index ec666359aa..c4f4b478fb 100644 --- a/paddle/fluid/operators/beam_search_op_test.cc +++ b/paddle/fluid/operators/beam_search_op_test.cc @@ -30,7 +30,7 @@ using std::endl; void CreateInput(LoDTensor* ids, LoDTensor* scores) { LoD lod; - vector level0({0, 1, 4}); + vector level0({0, 2, 4}); vector level1({0, 1, 2, 3, 4}); lod.push_back(level0); lod.push_back(level1); @@ -64,17 +64,22 @@ TEST(beam_search_op, run) { for (int i = 0; i < 4; i++) { pre_ids.mutable_data(place)[i] = i + 1; } + LoDTensor pre_scores; + pre_scores.Resize(framework::make_ddim(vector(4, 1))); + for (int i = 0; i < 4; i++) { + pre_scores.mutable_data(place)[i] = 0.1 * (i + 1); + } - BeamSearch beamsearch(ids, scores, (int64_t)0, (int64_t)2, 0); + BeamSearch beamsearch(ids, scores, (size_t)0, (size_t)2, 0); LoDTensor sids, sscores; - beamsearch(pre_ids, &sids, &sscores); + beamsearch(pre_ids, pre_scores, &sids, &sscores); LOG(INFO) << "score: " << sscores << endl; ASSERT_EQ(sids.lod(), sscores.lod()); - vector tids({2, 4, 3, 8}); - vector tscores({0.3, 0.5, 0.9, 0.7}); + vector tids({4, 2, 3, 8}); + vector tscores({0.5, 0.6, 0.9, 0.7}); for (int i = 0; i < 4; i++) { ASSERT_EQ(tids[i], sids.data()[i]); diff --git a/paddle/fluid/operators/bilinear_interp_op.cc b/paddle/fluid/operators/bilinear_interp_op.cc index 69f79bf93b..2dc3399da1 100644 --- a/paddle/fluid/operators/bilinear_interp_op.cc +++ b/paddle/fluid/operators/bilinear_interp_op.cc @@ -34,23 +34,38 @@ class BilinearInterpOp : public framework::OperatorWithKernel { int out_w = ctx->Attrs().Get("out_w"); PADDLE_ENFORCE_EQ(dim_x.size(), 4, "X's dimension must be 4"); + if (ctx->HasInput("OutSize")) { + auto out_size_dim = ctx->GetInputDim("OutSize"); + PADDLE_ENFORCE_EQ(out_size_dim.size(), 1, + "OutSize's dimension size must be 1"); + PADDLE_ENFORCE_EQ(out_size_dim[0], 2, "OutSize's dim[0] must be 2"); + } std::vector dim_out({dim_x[0], dim_x[1], out_h, out_w}); ctx->SetOutputDim("Out", framework::make_ddim(dim_out)); } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace()); + } }; class BilinearInterpOpMaker : public framework::OpProtoAndCheckerMaker { public: - BilinearInterpOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", - "(Tensor) The input tensor of bilinear interpolation, " + "The input tensor of bilinear interpolation, " "This is a 4-D tensor with shape of (N x C x h x w)"); - AddOutput("Out", - "(Tensor) The dimension of output is (N x C x out_h x out_w]"); + AddInput("OutSize", + "This is a 1-D tensor with two number. " + "The first number is height and the second number is width.") + .AsDispensable(); + AddOutput("Out", "The dimension of output is (N x C x out_h x out_w)"); - AddAttr("out_h", "(int) output height of bilinear interpolation op."); - AddAttr("out_w", "(int) output width of bilinear interpolation op."); + AddAttr("out_h", "output height of bilinear interpolation op."); + AddAttr("out_w", "output width of bilinear interpolation op."); AddComment(R"DOC( Bilinear interpolation is an extension of linear interpolation for interpolating functions of two variables (e.g. H-direction and @@ -79,6 +94,12 @@ class BilinearInterpOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(framework::GradVarName("X"), dim_x); } } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace()); + } }; } // namespace operators @@ -89,6 +110,7 @@ REGISTER_OPERATOR(bilinear_interp, ops::BilinearInterpOp, ops::BilinearInterpOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(bilinear_interp_grad, ops::BilinearInterpOpGrad); -REGISTER_OP_CPU_KERNEL(bilinear_interp, ops::BilinearInterpKernel); +REGISTER_OP_CPU_KERNEL(bilinear_interp, ops::BilinearInterpKernel, + ops::BilinearInterpKernel); REGISTER_OP_CPU_KERNEL(bilinear_interp_grad, ops::BilinearInterpGradKernel); diff --git a/paddle/fluid/operators/bilinear_interp_op.cu b/paddle/fluid/operators/bilinear_interp_op.cu index 510190f1aa..4c19715384 100644 --- a/paddle/fluid/operators/bilinear_interp_op.cu +++ b/paddle/fluid/operators/bilinear_interp_op.cu @@ -102,10 +102,21 @@ class BilinearInterpOpCUDAKernel : public framework::OpKernel { auto* input_t = ctx.Input("X"); // float tensor auto* output_t = ctx.Output("Out"); // float tensor auto* input = input_t->data(); - auto* output = output_t->mutable_data(ctx.GetPlace()); int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + auto out_dims = output_t->dims(); + auto out_size_t = ctx.Input("OutSize"); + if (out_size_t != nullptr) { + Tensor sizes; + framework::TensorCopy(*out_size_t, platform::CPUPlace(), &sizes); + auto size_data = sizes.data(); + out_h = size_data[0]; + out_w = size_data[1]; + } + auto* output = output_t->mutable_data( + {out_dims[0], out_dims[1], out_h, out_w}, ctx.GetPlace()); + int batch_size = input_t->dims()[0]; int channels = input_t->dims()[1]; int in_h = input_t->dims()[2]; @@ -139,8 +150,8 @@ class BilinearInterpGradOpCUDAKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* d_input_t = ctx.Output(framework::GradVarName("X")); auto* d_output_t = ctx.Input(framework::GradVarName("Out")); - auto* d_input = d_input_t->mutable_data(ctx.GetPlace()); auto* d_output = d_output_t->data(); + auto* d_input = d_input_t->mutable_data(ctx.GetPlace()); auto& device_ctx = ctx.template device_context(); @@ -149,6 +160,16 @@ class BilinearInterpGradOpCUDAKernel : public framework::OpKernel { int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + + auto out_size_t = ctx.Input("OutSize"); + if (out_size_t != nullptr) { + Tensor sizes; + framework::TensorCopy(*out_size_t, platform::CPUPlace(), &sizes); + auto size_data = sizes.data(); + out_h = size_data[0]; + out_w = size_data[1]; + } + int batch_size = d_input_t->dims()[0]; int channels = d_input_t->dims()[1]; int in_h = d_input_t->dims()[2]; diff --git a/paddle/fluid/operators/bilinear_interp_op.h b/paddle/fluid/operators/bilinear_interp_op.h index f6cd77e4d4..70847cb8c1 100644 --- a/paddle/fluid/operators/bilinear_interp_op.h +++ b/paddle/fluid/operators/bilinear_interp_op.h @@ -24,11 +24,18 @@ class BilinearInterpKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* input_t = ctx.Input("X"); // float tensor auto* output_t = ctx.Output("Out"); // float tensor + auto out_dims = output_t->dims(); auto* input = input_t->data(); - auto* output = output_t->mutable_data(ctx.GetPlace()); - int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + auto out_size_t = ctx.Input("OutSize"); + if (out_size_t != nullptr) { + auto out_size_data = out_size_t->data(); + out_h = out_size_data[0]; + out_w = out_size_data[1]; + } + auto* output = output_t->mutable_data( + {out_dims[0], out_dims[1], out_h, out_w}, ctx.GetPlace()); int batch_size = input_t->dims()[0]; int channels = input_t->dims()[1]; int in_h = input_t->dims()[2]; @@ -39,8 +46,10 @@ class BilinearInterpKernel : public framework::OpKernel { int in_chw = channels * in_hw; int out_chw = channels * out_hw; - T ratio_h = (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; - T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + float ratio_h = + (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; + float ratio_w = + (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; if (in_h == out_h && in_w == out_w) { memcpy(output, input, input_t->numel() * sizeof(T)); @@ -49,24 +58,24 @@ class BilinearInterpKernel : public framework::OpKernel { for (int i = 0; i < out_h; ++i) { // loop for images int h = ratio_h * i; int hid = (h < in_h - 1) ? 1 : 0; - T h1lambda = ratio_h * i - h; - T h2lambda = 1 - h1lambda; + float h1lambda = ratio_h * i - h; + float h2lambda = 1.f - h1lambda; for (int j = 0; j < out_w; ++j) { int w = ratio_w * j; int wid = (w < in_w - 1) ? 1 : 0; - T w1lambda = ratio_w * j - w; - T w2lambda = 1 - w1lambda; + float w1lambda = ratio_w * j - w; + float w2lambda = 1.f - w1lambda; // calculate four position for bilinear interpolation const T* in_pos = &input[k * in_chw + h * in_w + w]; T* out_pos = &output[k * out_chw + i * out_w + j]; for (int c = 0; c < channels; ++c) { // loop for channels // bilinear interpolation - out_pos[0] = + out_pos[0] = static_cast( h2lambda * (w2lambda * in_pos[0] + w1lambda * in_pos[wid]) + h1lambda * (w2lambda * in_pos[hid * in_w] + - w1lambda * in_pos[hid * in_w + wid]); + w1lambda * in_pos[hid * in_w + wid])); in_pos += in_hw; out_pos += out_hw; } @@ -83,9 +92,8 @@ class BilinearInterpGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { auto* d_input_t = ctx.Output(framework::GradVarName("X")); auto* d_output_t = ctx.Input(framework::GradVarName("Out")); - auto* d_input = d_input_t->mutable_data(ctx.GetPlace()); auto* d_output = d_output_t->data(); - + auto* d_input = d_input_t->mutable_data(ctx.GetPlace()); auto& device_ctx = ctx.template device_context(); math::SetConstant zero; @@ -93,6 +101,14 @@ class BilinearInterpGradKernel : public framework::OpKernel { int out_h = ctx.Attr("out_h"); int out_w = ctx.Attr("out_w"); + + auto out_size_t = ctx.Input("OutSize"); + if (out_size_t != nullptr) { + auto out_size_data = out_size_t->data(); + out_h = out_size_data[0]; + out_w = out_size_data[1]; + } + int batch_size = d_input_t->dims()[0]; int channels = d_input_t->dims()[1]; int in_h = d_input_t->dims()[2]; @@ -103,8 +119,10 @@ class BilinearInterpGradKernel : public framework::OpKernel { int in_chw = channels * in_hw; int out_chw = channels * out_hw; - T ratio_h = (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; - T ratio_w = (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; + float ratio_h = + (out_h > 1) ? static_cast(in_h - 1) / (out_h - 1) : 0.f; + float ratio_w = + (out_w > 1) ? static_cast(in_w - 1) / (out_w - 1) : 0.f; if (in_h == out_h && in_w == out_w) { memcpy(d_input, d_output, d_input_t->numel() * sizeof(T)); @@ -113,22 +131,24 @@ class BilinearInterpGradKernel : public framework::OpKernel { for (int i = 0; i < out_h; ++i) { // loop for images int h = ratio_h * i; int hid = (h < in_h - 1) ? 1 : 0; - T h1lambda = ratio_h * i - h; - T h2lambda = 1 - h1lambda; + float h1lambda = ratio_h * i - h; + float h2lambda = 1 - h1lambda; for (int j = 0; j < out_w; ++j) { int w = ratio_w * j; int wid = (w < in_w - 1) ? 1 : 0; - T w1lambda = ratio_w * j - w; - T w2lambda = 1 - w1lambda; + float w1lambda = ratio_w * j - w; + float w2lambda = 1 - w1lambda; T* in_pos = &d_input[k * in_chw + h * in_w + w]; const T* out_pos = &d_output[k * out_chw + i * out_w + j]; for (int c = 0; c < channels; ++c) { // loop for channels - in_pos[0] += h2lambda * w2lambda * out_pos[0]; - in_pos[wid] += h2lambda * w1lambda * out_pos[0]; - in_pos[hid * in_w] += h1lambda * w2lambda * out_pos[0]; - in_pos[hid * in_w + wid] += h1lambda * w1lambda * out_pos[0]; + in_pos[0] += static_cast(h2lambda * w2lambda * out_pos[0]); + in_pos[wid] += static_cast(h2lambda * w1lambda * out_pos[0]); + in_pos[hid * in_w] += + static_cast(h1lambda * w2lambda * out_pos[0]); + in_pos[hid * in_w + wid] += + static_cast(h1lambda * w1lambda * out_pos[0]); in_pos += in_hw; out_pos += out_hw; } diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc index e910ad92d1..8d261a118a 100644 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ b/paddle/fluid/operators/bilinear_tensor_product_op.cc @@ -65,8 +65,7 @@ class BilinearTensorProductOp : public framework::OperatorWithKernel { class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker { public: - BilinearTensorProductOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The first input of bilinear_tensor_product operator."); AddInput("Y", "The second input of bilinear_tensor_product operator."); AddInput("Weight", diff --git a/paddle/fluid/operators/cast_op.cc b/paddle/fluid/operators/cast_op.cc index dd0068d571..8d6a498dc9 100644 --- a/paddle/fluid/operators/cast_op.cc +++ b/paddle/fluid/operators/cast_op.cc @@ -21,8 +21,7 @@ namespace operators { class CastOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - CastOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of cast op"); AddOutput("Out", "The output tensor of cast op"); AddAttr("out_dtype", "output data type"); @@ -90,4 +89,5 @@ REGISTER_OP_CPU_KERNEL(cast, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, ops::CastOpKernel, + ops::CastOpKernel, ops::CastOpKernel); diff --git a/paddle/fluid/operators/cast_op.cu b/paddle/fluid/operators/cast_op.cu index c486c5850e..657d162878 100644 --- a/paddle/fluid/operators/cast_op.cu +++ b/paddle/fluid/operators/cast_op.cu @@ -21,5 +21,5 @@ using CastOpKernel = REGISTER_OP_CUDA_KERNEL(cast, CastOpKernel, CastOpKernel, CastOpKernel, CastOpKernel, - CastOpKernel, + CastOpKernel, CastOpKernel, CastOpKernel); diff --git a/paddle/fluid/operators/channel_close_op.cc b/paddle/fluid/operators/channel_close_op.cc index 5892650c49..8e2db250a0 100644 --- a/paddle/fluid/operators/channel_close_op.cc +++ b/paddle/fluid/operators/channel_close_op.cc @@ -50,8 +50,7 @@ class ChannelCloseOpOpInferShape : public framework::InferShapeBase { class ChannelCloseOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelCloseOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kChannel, "The Channel Variable that should be closed by" " the ChannelClose Op."); diff --git a/paddle/fluid/operators/channel_create_op.cc b/paddle/fluid/operators/channel_create_op.cc index b2fdfd0e1f..a7f59e4088 100644 --- a/paddle/fluid/operators/channel_create_op.cc +++ b/paddle/fluid/operators/channel_create_op.cc @@ -91,8 +91,7 @@ class ChannelCreateOpOpInferShape : public framework::InferShapeBase { class ChannelCreateOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelCreateOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput(kOutput, "The object of a Channel type created by ChannelCreate Op."); AddAttr("capacity", "The size of the buffer of Channel.") diff --git a/paddle/fluid/operators/channel_recv_op.cc b/paddle/fluid/operators/channel_recv_op.cc index 25c5c3c95e..101015e837 100644 --- a/paddle/fluid/operators/channel_recv_op.cc +++ b/paddle/fluid/operators/channel_recv_op.cc @@ -72,8 +72,7 @@ class ChannelRecvOp : public framework::OperatorBase { class ChannelRecvOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelRecvOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(Channel, "(Channel) A variable which \"receives\" the a value sent" "to it by a channel_send op.") diff --git a/paddle/fluid/operators/channel_send_op.cc b/paddle/fluid/operators/channel_send_op.cc index 66d33617ed..67d6deb511 100644 --- a/paddle/fluid/operators/channel_send_op.cc +++ b/paddle/fluid/operators/channel_send_op.cc @@ -57,8 +57,7 @@ class ChannelSendOp : public framework::OperatorBase { class ChannelSendOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChannelSendOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(Channel, "(Channel) A variable which \"sends\" the passed in value to " "a listening receiver.") diff --git a/paddle/fluid/operators/checkpoint_notify_op.cc b/paddle/fluid/operators/checkpoint_notify_op.cc new file mode 100644 index 0000000000..3a2527e407 --- /dev/null +++ b/paddle/fluid/operators/checkpoint_notify_op.cc @@ -0,0 +1,88 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include // NOLINT +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/operators/send_recv_util.h" +#include "paddle/fluid/string/printf.h" + +namespace paddle { +namespace operators { + +class CheckpointNotifyOp : public framework::OperatorBase { + public: + CheckpointNotifyOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { + std::vector epmap = Attr>("epmap"); + std::string dir = Attr("dir"); + std::string lookup_table_name = Attr("lookup_table"); + + distributed::RPCClient* rpc_client = + distributed::RPCClient::GetInstance(); + for (size_t i = 0; i < epmap.size(); i++) { + auto lookup_table_save_dir = + string::Sprintf("%s/%s_%d", dir, lookup_table_name, i); + rpc_client->AsyncCheckpointNotify(epmap[i], lookup_table_save_dir); + VLOG(3) << "checkpoint notify sending lookup table: " << lookup_table_name + << " and dir:" << dir << " to " << epmap[i]; + } + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); + } +}; + +class CheckpointNotifyOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() { + AddAttr>("epmap", + "(string vector, default 127.0.0.1:6164)" + "Parameter Server endpoints in the order") + .SetDefault({"127.0.0.1:6164"}); + AddAttr( + "dir", "(string, default '') indicate the folder checkpoint will use"); + AddAttr("lookup_table", + "(string, default '') the lookup table name"); + AddComment(R"DOC( +CheckpointNotify operator + +This operator will send lookup table and it's checkpoint direcoty to listen_and_serve op at +the parameter server. +)DOC"); + } +}; + +class CheckpointNotifyOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(checkpoint_notify, ops::CheckpointNotifyOp, + paddle::framework::EmptyGradOpMaker, + ops::CheckpointNotifyOpMaker, + ops::CheckpointNotifyOpShapeInference); diff --git a/paddle/fluid/operators/chunk_eval_op.cc b/paddle/fluid/operators/chunk_eval_op.cc index 95440ff89e..dc43c69be0 100644 --- a/paddle/fluid/operators/chunk_eval_op.cc +++ b/paddle/fluid/operators/chunk_eval_op.cc @@ -66,8 +66,7 @@ class ChunkEvalOp : public framework::OperatorWithKernel { class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { public: - ChunkEvalOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Inference", "(Tensor, default: Tensor). " "Predictions from the network."); @@ -92,32 +91,31 @@ class ChunkEvalOpMaker : public framework::OpProtoAndCheckerMaker { "(int64_t). The number of chunks both in Inference and Label on the " "given mini-batch."); AddAttr("num_chunk_types", - "(int). The number of chunk type. See below for details."); - AddAttr( - "chunk_scheme", - "(string, default IOB). The labeling scheme indicating " - "how to encode the chunks. Must be IOB, IOE, IOBES or plain. See below " - "for details.") + "The number of chunk type. See the description for details."); + AddAttr("chunk_scheme", + "The labeling scheme indicating " + "how to encode the chunks. Must be IOB, IOE, IOBES or " + "plain. See the description" + "for details.") .SetDefault("IOB"); AddAttr>("excluded_chunk_types", - "(list) A list including chunk type ids " + "A list including chunk type ids " "indicating chunk types that are not counted. " - "See below for details.") + "See the description for details.") .SetDefault(std::vector{}); AddComment(R"DOC( For some basics of chunking, please refer to -‘Chunking with Support Vector Machines ’. +'Chunking with Support Vector Machines '. - -CheckEvalOp computes the precision, recall, and F1-score of chunk detection, +ChunkEvalOp computes the precision, recall, and F1-score of chunk detection, and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. Here is a NER example of labeling for these tagging schemes: - - Li Ming works at Agricultural Bank of China in Beijing. - IO: I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC - IOB: B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC - IOE: I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC - IOBES: B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC + + Li Ming works at Agricultural Bank of China in Beijing. + IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC + IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC + IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC + IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC There are three chunk types(named entity types) including PER(person), ORG(organization) and LOC(LOCATION), and we can see that the labels have the form -. @@ -125,31 +123,31 @@ and LOC(LOCATION), and we can see that the labels have the form - class ClipOpMaker : public framework::OpProtoAndCheckerMaker { public: - ClipOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor)The input of clip op." "The number of dimensions must be between [1, 9]."); diff --git a/paddle/fluid/operators/compare_op.cc b/paddle/fluid/operators/compare_op.cc index 3a6a357e81..f40b1ba338 100644 --- a/paddle/fluid/operators/compare_op.cc +++ b/paddle/fluid/operators/compare_op.cc @@ -21,33 +21,28 @@ namespace operators { template class CompareOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - CompareOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { OpComment comment; - AddInput("X", - string::Sprintf("(LoDTensor) the left hand operand of %s operator", - comment.type)); - AddInput("Y", string::Sprintf( - "(LoDTensor) the right hand operand of %s operator", - comment.type)); + AddInput("X", string::Sprintf("the left hand operand of %s operator", + comment.type)); + AddInput("Y", string::Sprintf("the right hand operand of %s operator", + comment.type)); AddAttr("force_cpu", - "(bool, default false) Force fill output variable to cpu " + "Force fill output variable to cpu " "memory. Otherwise, fill output variable to the running " - "device") - .SetDefault(false); - AddOutput("Out", string::Sprintf( - "(LoDTensor) n-dim bool tensor. Each element is %s", - comment.equation)); - AddComment(string::Sprintf(R"DOC(%s Operator - + "device [default true].") + .SetDefault(true); + AddOutput("Out", string::Sprintf("n-dim bool tensor. Each element is %s", + comment.equation)); + AddComment(string::Sprintf(R"DOC( It operates element-wise on X and Y, and returns the Out. Each of them is a N-dim tensor. X and Y could be any type. The each element of the Out tensor is -calculated by %s +calculated by $%s$ )DOC", - comment.type, comment.equation)); - AddAttr("axis", - "(int, default -1). The start dimension index " - "for broadcasting Y onto X.") + comment.equation)); + AddAttr( + "axis", + "The start dimension index for broadcasting Y onto X. [default -1]") .SetDefault(-1) .EqualGreaterThan(-1); } diff --git a/paddle/fluid/operators/concat_op.cc b/paddle/fluid/operators/concat_op.cc index 3bb3bd4eb1..c724055937 100644 --- a/paddle/fluid/operators/concat_op.cc +++ b/paddle/fluid/operators/concat_op.cc @@ -63,8 +63,7 @@ class ConcatOp : public framework::OperatorWithKernel { class ConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: - ConcatOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Input tensors of concat operator.").AsDuplicable(); AddOutput("Out", "Output tensor of concat operator."); AddAttr("axis", @@ -108,7 +107,13 @@ REGISTER_OPERATOR(concat, ops::ConcatOp, ops::ConcatOpMaker, false> /* set false to disable empty grad */); REGISTER_OPERATOR(concat_grad, ops::ConcatOpGrad); REGISTER_OP_CPU_KERNEL( - concat, ops::ConcatKernel); + concat, ops::ConcatKernel, + ops::ConcatKernel, + ops::ConcatKernel, + ops::ConcatKernel); REGISTER_OP_CPU_KERNEL( concat_grad, - ops::ConcatGradKernel); + ops::ConcatGradKernel, + ops::ConcatGradKernel, + ops::ConcatGradKernel, + ops::ConcatGradKernel); diff --git a/paddle/fluid/operators/concat_op.cu.cc b/paddle/fluid/operators/concat_op.cu.cc index 590eca9d06..8e38e5231f 100644 --- a/paddle/fluid/operators/concat_op.cu.cc +++ b/paddle/fluid/operators/concat_op.cu.cc @@ -15,7 +15,13 @@ limitations under the License. */ #include "paddle/fluid/operators/concat_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( - concat, ops::ConcatKernel); + concat, ops::ConcatKernel, + ops::ConcatKernel, + ops::ConcatKernel, + ops::ConcatKernel); REGISTER_OP_CUDA_KERNEL( concat_grad, - ops::ConcatGradKernel); + ops::ConcatGradKernel, + ops::ConcatGradKernel, + ops::ConcatGradKernel, + ops::ConcatGradKernel); diff --git a/paddle/fluid/operators/concat_op.h b/paddle/fluid/operators/concat_op.h index 1b1b8bf5ed..a496301526 100644 --- a/paddle/fluid/operators/concat_op.h +++ b/paddle/fluid/operators/concat_op.h @@ -60,34 +60,45 @@ template class ConcatGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const { - auto* in = ctx.Input(framework::GradVarName("Out")); + auto* out_grad = + ctx.Input(framework::GradVarName("Out")); + auto ins = ctx.MultiInput("X"); + auto out_var_names = ctx.Outputs(framework::GradVarName("X")); auto outs = ctx.MultiOutput(framework::GradVarName("X")); int64_t axis = static_cast(ctx.Attr("axis")); + // get output tensor that the name is not kEmptyVarName + std::vector outputs; + for (size_t j = 0; j < outs.size(); ++j) { + if (out_var_names[j] != framework::kEmptyVarName) { + outs[j]->mutable_data(ctx.GetPlace()); + outputs.push_back(outs[j]); + } else { + outputs.push_back(nullptr); + } + } + // Sometimes direct copies will be faster, this maybe need deeply analysis. if (axis == 0 && outs.size() < 10) { size_t input_offset = 0; - auto in_stride = framework::stride_numel(in->dims()); + const auto in_stride = framework::stride_numel(out_grad->dims()); - for (auto& out : outs) { - out->mutable_data(ctx.GetPlace()); - auto out_stride = framework::stride_numel(out->dims()); - StridedNumelCopyWithAxis(ctx.device_context(), axis, out->data(), - out_stride, in->data() + input_offset, - in_stride, out_stride[axis]); + for (size_t i = 0; i < outs.size(); ++i) { + auto out_stride = framework::stride_numel(ins[i]->dims()); + auto* out = outputs[i]; + if (out != nullptr) { + StridedNumelCopyWithAxis( + ctx.device_context(), axis, out->data(), out_stride, + out_grad->data() + input_offset, in_stride, out_stride[axis]); + } input_offset += out_stride[axis]; } } else { - std::vector outputs(outs.size()); - for (size_t j = 0; j < outs.size(); ++j) { - outs[j]->mutable_data(ctx.GetPlace()); - outputs[j] = *outs[j]; - } - auto& dev_ctx = ctx.template device_context(); paddle::operators::math::ConcatGradFunctor concat_grad_functor; - concat_grad_functor(dev_ctx, *in, static_cast(axis), &outputs); + concat_grad_functor(dev_ctx, *out_grad, ins, static_cast(axis), + &outputs); } } }; diff --git a/paddle/fluid/operators/conditional_block_op.cc b/paddle/fluid/operators/conditional_block_op.cc index 27f74a789b..580fde7538 100644 --- a/paddle/fluid/operators/conditional_block_op.cc +++ b/paddle/fluid/operators/conditional_block_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/var_type.h" namespace paddle { namespace operators { @@ -47,7 +48,7 @@ class ConditionalOp : public framework::OperatorBase { if (!(ips.size() == 1UL && ips[0]->IsInitialized())) { PADDLE_THROW("should have one initialized input as condition"); } - if (!(ips[0]->type().hash_code() == typeid(bool).hash_code() && // NOLINT + if (!(framework::IsType(ips[0]->type()) && // NOLINT ips[0]->numel() == 1)) { PADDLE_THROW( "condition input's data type should be bool, " @@ -108,8 +109,7 @@ class ConditionalBlockOp : public ConditionalOp { class ConditionalBlockOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ConditionalBlockOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The conditional variable of this operator. If X is empty, the " "whole sub-block will not be executed.") @@ -205,9 +205,10 @@ class ConditionalBlockGradInferShape : public framework::InferShapeBase { context->SetOutputsDim(framework::GradVarName("Params"), context->GetInputsDim("Params")); } - PADDLE_ENFORCE(context->HasOutputs(framework::GradVarName("X"))); - context->SetOutputsDim(framework::GradVarName("X"), - context->GetInputsDim("X")); + if (context->HasOutputs(framework::GradVarName("X"))) { + context->SetOutputsDim(framework::GradVarName("X"), + context->GetInputsDim("X")); + } } }; diff --git a/paddle/fluid/operators/conv_cudnn_op.cu.cc b/paddle/fluid/operators/conv_cudnn_op.cu.cc index 7a7b8b76e4..22cbf680c0 100644 --- a/paddle/fluid/operators/conv_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_cudnn_op.cu.cc @@ -20,10 +20,10 @@ limitations under the License. */ #include "paddle/fluid/platform/cudnn_helper.h" #include "paddle/fluid/platform/float16.h" -DEFINE_bool(cudnn_algo_use_autotune, true, +DEFINE_bool(cudnn_deterministic, false, "Whether allow using an autotuning algorithm for convolution " "operator. The autotuning algorithm may be non-deterministic. If " - "false, the algorithm is deterministic."); + "true, the algorithm is deterministic."); namespace paddle { namespace operators { @@ -77,7 +77,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { // cudnn 7 can support groups, no need to do it mannually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. - PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount( + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount( cudnn_conv_desc, groups)); groups = 1; #endif @@ -129,7 +129,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); - PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &algo)); @@ -140,18 +140,18 @@ class CUDNNConvOpKernel : public framework::OpKernel { if (dev_ctx.GetComputeCapability() >= 70 && std::type_index(typeid(T)) == std::type_index(typeid(platform::float16))) { - PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( cudnn_conv_desc, CUDNN_TENSOR_OP_MATH)); // Currently tensor core is only enabled using this algo algo = CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM; } else { - PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionMathType( cudnn_conv_desc, CUDNN_DEFAULT_MATH)); } #endif // get workspace size able to allocate - PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_input_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); // It is possible for float16 on Volta GPU to allocate more memory than @@ -165,7 +165,7 @@ class CUDNNConvOpKernel : public framework::OpKernel { // ------------------- cudnn conv forward --------------------- ScalingParamType alpha = 1.0f, beta = 0.0f; for (int i = 0; i < groups; i++) { - PADDLE_ENFORCE(platform::dynload::cudnnConvolutionForward( + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, cudnn_filter_desc, filter_data + i * group_offset_filter, cudnn_conv_desc, algo, cudnn_workspace, workspace_size_in_bytes, @@ -218,7 +218,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // cudnn 7 can support groups, no need to do it mannually // FIXME(typhoonzero): find a better way to disable groups // rather than setting it to 1. - PADDLE_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount( + CUDNN_ENFORCE(platform::dynload::cudnnSetConvolutionGroupCount( cudnn_conv_desc, groups)); groups = 1; #endif @@ -272,8 +272,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); if (input_grad) { - if (FLAGS_cudnn_algo_use_autotune) { - PADDLE_ENFORCE( + if (!FLAGS_cudnn_deterministic) { + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( handle, cudnn_filter_desc, // dyDesc: Handle to the previously initialized input @@ -289,7 +289,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { data_algo = CUDNN_CONVOLUTION_BWD_DATA_ALGO_1; } - PADDLE_ENFORCE( + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( handle, cudnn_filter_desc, cudnn_output_grad_desc, cudnn_conv_desc, cudnn_input_desc, data_algo, &tmp_size)); @@ -297,8 +297,8 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { } if (filter_grad) { - if (FLAGS_cudnn_algo_use_autotune) { - PADDLE_ENFORCE( + if (!FLAGS_cudnn_deterministic) { + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( handle, cudnn_input_desc, cudnn_output_grad_desc, cudnn_conv_desc, cudnn_filter_desc, @@ -308,7 +308,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { filter_algo = CUDNN_CONVOLUTION_BWD_FILTER_ALGO_1; } - PADDLE_ENFORCE( + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( handle, cudnn_input_desc, cudnn_output_grad_desc, cudnn_conv_desc, cudnn_filter_desc, filter_algo, &tmp_size)); @@ -326,7 +326,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { // Because beta is zero, it is unnecessary to reset input_grad. for (int i = 0; i < groups; i++) { - PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( handle, &alpha, cudnn_filter_desc, filter_data + i * group_offset_filter, cudnn_output_grad_desc, output_grad_data + i * group_offset_out, cudnn_conv_desc, data_algo, @@ -339,7 +339,7 @@ class CUDNNConvGradOpKernel : public framework::OpKernel { T* filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. for (int i = 0; i < groups; i++) { - PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( handle, &alpha, cudnn_input_desc, input_data + i * group_offset_in, cudnn_output_grad_desc, output_grad_data + i * group_offset_out, cudnn_conv_desc, filter_algo, cudnn_workspace, diff --git a/paddle/fluid/operators/conv_mkldnn_op.cc b/paddle/fluid/operators/conv_mkldnn_op.cc index 63d371310d..f07ab5a33b 100644 --- a/paddle/fluid/operators/conv_mkldnn_op.cc +++ b/paddle/fluid/operators/conv_mkldnn_op.cc @@ -18,6 +18,204 @@ namespace paddle { namespace operators { +using framework::DataLayout; +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::reorder; +using mkldnn::stream; +using platform::to_void_cast; +using platform::GetMKLDNNFormat; + +class ConvMKLDNNHandler : public platform::MKLDNNHandler { + public: + ConvMKLDNNHandler( + std::shared_ptr conv_pd, + const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine, + const std::string& base_key) + : platform::MKLDNNHandler(dev_ctx, engine, base_key) { + conv_pd_ = conv_pd; + } + + ConvMKLDNNHandler( + std::shared_ptr conv_pd, + std::shared_ptr + conv_bwd_data_pd, + std::shared_ptr + conv_bwd_weights_pd, + const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine, + const std::string& base_key) + : platform::MKLDNNHandler(dev_ctx, engine, base_key), + conv_pd_(conv_pd), + conv_bwd_weights_pd_(conv_bwd_weights_pd), + conv_bwd_data_pd_(conv_bwd_data_pd) { + // If we are in Grad operatgor then update a key with BWD suffix to + // distinguish from FWD memory primitives + key_ += "-BWD"; + } + + std::shared_ptr AcquireSrcMemoryFromWeightsPrimitive( + const std::shared_ptr user_memory_p, + std::vector& pipeline) { // NOLINT + auto src_pd = conv_bwd_weights_pd_->src_primitive_desc(); + auto user_pd = user_memory_p->get_primitive_desc(); + return this->AcquireMemory(src_pd, user_pd, user_memory_p, + "@weights-src_mem_p", pipeline); + } + + std::shared_ptr AcquireDiffDstMemoryFromWeightsPrimitive( + const std::shared_ptr user_memory_p, + std::vector& pipeline) { // NOLINT + auto diff_dst_pd = conv_bwd_weights_pd_->diff_dst_primitive_desc(); + auto user_pd = user_memory_p->get_primitive_desc(); + return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p, + "@weights-diff_dst_mem_p", pipeline); + } + + std::shared_ptr AcquireDiffWeightsMemoryFromWeightsPrimitive( + void* ptr) { + return this->AcquireMemoryFromPrimitive( + conv_bwd_weights_pd_->diff_weights_primitive_desc(), ptr, + "@diff_weights_mem_p"); + } + + std::shared_ptr AcquireDiffDstMemoryFromDataPrimitive( + const std::shared_ptr user_memory_p, + std::vector& pipeline) { // NOLINT + auto diff_dst_pd = conv_bwd_data_pd_->diff_dst_primitive_desc(); + auto user_pd = user_memory_p->get_primitive_desc(); + return this->AcquireMemory(diff_dst_pd, user_pd, user_memory_p, + "@data-diff_dst_mem_p", pipeline); + } + + std::shared_ptr AcquireWeightsMemoryFromDataPrimitive( + const std::shared_ptr user_weights_memory_p, + std::vector& pipeline) { // NOLINT + auto weights_pd = conv_bwd_data_pd_->weights_primitive_desc(); + auto user_pd = user_weights_memory_p->get_primitive_desc(); + return this->AcquireMemory(weights_pd, user_pd, user_weights_memory_p, + "@data-weights_mem_p", pipeline); + } + + std::shared_ptr AcquireDiffSrcMemoryFromDataPrimitive( + void* ptr) { + return this->AcquireMemoryFromPrimitive( + conv_bwd_data_pd_->diff_src_primitive_desc(), ptr, "@diff_src_mem_p"); + } + + std::shared_ptr AcquireDstMemoryFromPrimitive(void* ptr) { + return this->AcquireMemoryFromPrimitive(conv_pd_->dst_primitive_desc(), ptr, + "@dst_mem_p"); + } + + std::shared_ptr AcquireSrcMemoryFromPrimitive( + const std::shared_ptr user_memory_p, + std::vector& pipeline) { // NOLINT + auto src_pd = conv_pd_->src_primitive_desc(); + auto user_pd = user_memory_p->get_primitive_desc(); + return this->AcquireMemory(src_pd, user_pd, user_memory_p, "@src_mem_p", + pipeline); + } + + std::shared_ptr AcquireWeightsMemoryFromPrimitive( + const std::shared_ptr user_weights_memory_p, + std::vector& pipeline) { // NOLINT + auto user_weights_pd = user_weights_memory_p->get_primitive_desc(); + auto weights_pd = conv_pd_->weights_primitive_desc(); + return this->AcquireMemory(weights_pd, user_weights_pd, + user_weights_memory_p, "@weights_mem_p", + pipeline); + } + + std::shared_ptr AcquireConvolution( + std::shared_ptr src_memory_p, + std::shared_ptr weights_memory_p, + std::shared_ptr dst_memory_p) { + auto prim_key = key_ + "@conv_p"; + auto conv_p = std::static_pointer_cast( + dev_ctx_.GetBlob(prim_key)); + PADDLE_ENFORCE((conv_p != nullptr) || (is_reusing_ == false), + "Fail to find convolution primitive in device context"); + if (conv_p == nullptr) { + conv_p = std::make_shared( + *conv_pd_, *(src_memory_p), *(weights_memory_p.get()), + *(dst_memory_p.get())); + + dev_ctx_.SetBlob(prim_key, conv_p); + } else { + is_reusing_ = true; + } + return conv_p; + } + + std::shared_ptr + AcquireConvolutionBackwardWeights( + std::shared_ptr src_memory_p, + std::shared_ptr diff_dst_memory_p, + std::shared_ptr diff_weights_memory_p) { + auto prim_key = key_ + "@conv_bwd_weights_p"; + auto conv_bwd_weights_p = + std::static_pointer_cast( + dev_ctx_.GetBlob(prim_key)); + PADDLE_ENFORCE( + (conv_bwd_weights_p != nullptr) || (is_reusing_ == false), + "Fail to find convolution bwd weights primitive in device context"); + if (conv_bwd_weights_p == nullptr) { + // create backward conv primitive for weights + conv_bwd_weights_p = + std::make_shared( + *conv_bwd_weights_pd_, *src_memory_p, *diff_dst_memory_p, + *diff_weights_memory_p); + dev_ctx_.SetBlob(prim_key, conv_bwd_weights_p); + } else { + is_reusing_ = true; + } + return conv_bwd_weights_p; + } + + std::shared_ptr + AcquireConvolutionBackwardData( + std::shared_ptr diff_dst_memory_p, + std::shared_ptr weights_memory_p, + std::shared_ptr diff_src_memory_p) { + auto prim_key = key_ + "@conv_bwd_data_p"; + auto conv_bwd_data_p = + std::static_pointer_cast( + dev_ctx_.GetBlob(prim_key)); + PADDLE_ENFORCE( + (conv_bwd_data_p != nullptr) || (is_reusing_ == false), + "Fail to find convolution bwd data primitive in device context"); + if (conv_bwd_data_p == nullptr) { + conv_bwd_data_p = std::make_shared( + *conv_bwd_data_pd_, *diff_dst_memory_p, *weights_memory_p, + *diff_src_memory_p); + dev_ctx_.SetBlob(prim_key, conv_bwd_data_p); + } else { + is_reusing_ = true; + } + return conv_bwd_data_p; + } + + // Generate keys for storing/retriving primitives for this operator + // TODO(jczaja): Make hashing function more optimial + static std::string GetHash(memory::dims& input_dims, // NOLINT + memory::dims& weights_dims, // NOLINT + std::vector& strides, // NOLINT + std::vector& paddings, // NOLINT + std::vector& dilations, // NOLINT + int groups, const std::string& suffix) { + return dims2str(input_dims) + dims2str(weights_dims) + dims2str(strides) + + dims2str(paddings) + dims2str(dilations) + std::to_string(groups) + + suffix; + } + + private: + std::shared_ptr conv_pd_; + std::shared_ptr + conv_bwd_weights_pd_; + std::shared_ptr + conv_bwd_data_pd_; +}; + template class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { public: @@ -33,10 +231,12 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { auto* filter = ctx.Input("Filter"); auto* output = ctx.Output("Output"); - // Get an unique name from "argument" name of "Output" variable - // This name will be used as key when saving info into device context - const std::string key = ctx.op().Output("Output"); - const std::string key_conv_pd = key + "@conv_pd"; + PADDLE_ENFORCE(input->layout() == DataLayout::kMKLDNN && + input->format() != memory::format::format_undef, + "Wrong layout/format set for Input tensor"); + PADDLE_ENFORCE(filter->layout() == DataLayout::kMKLDNN && + filter->format() != memory::format::format_undef, + "Wrong layout/format set for Filter tensor"); std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); @@ -63,49 +263,77 @@ class ConvMKLDNNOpKernel : public paddle::framework::OpKernel { paddle::framework::vectorize2int(filter->dims()); std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); - // TODO(pzelazko-intel): support more formats + // Get unique name for storing MKLDNN primitives + const std::string key = ConvMKLDNNHandler::GetHash( + src_tz, weights_tz, strides, paddings, dilations, groups, + ctx.op().Output("Output")); + const std::string key_conv_pd = key + "@conv_pd"; + + std::vector pipeline; + + auto user_src_md = platform::MKLDNNMemDesc( + {src_tz}, platform::MKLDNNGetDataType(), input->format()); + auto user_weights_md = platform::MKLDNNMemDesc( + {weights_tz}, platform::MKLDNNGetDataType(), filter->format()); + + /* create memory descriptor for convolution without specified format + * ('any') which lets a primitive (convolution in this case) choose + * the memory format preferred for best performance + */ + std::string data_format = ctx.Attr("data_format"); + auto chosen_memory_format = + platform::data_format_to_memory_format(data_format); + auto src_md = platform::MKLDNNMemDesc( - src_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); - auto weights_md = - platform::MKLDNNMemDesc(weights_tz, mkldnn::memory::data_type::f32, - mkldnn::memory::format::oihw); + src_tz, platform::MKLDNNGetDataType(), chosen_memory_format); + auto weights_md = platform::MKLDNNMemDesc( + weights_tz, platform::MKLDNNGetDataType(), chosen_memory_format); auto dst_md = platform::MKLDNNMemDesc( - dst_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); - - auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, - reinterpret_cast(const_cast(input_data))); - auto weights_memory = - mkldnn::memory({weights_md, mkldnn_engine}, - reinterpret_cast(const_cast(filter_data))); - auto dst_memory = mkldnn::memory({dst_md, mkldnn_engine}, output_data); + dst_tz, platform::MKLDNNGetDataType(), chosen_memory_format); + // create a conv primitive descriptor and save it for usage in backward std::shared_ptr conv_pd = ConvFwdPrimitiveDesc(src_md, weights_md, dst_md, strides, paddings, mkldnn_engine); - - // save conv_pd into global device context to be referred in backward path + // Save conv_pd/src_memory/weights_memory for backward pass dev_ctx.SetBlob(key_conv_pd, conv_pd); + ConvMKLDNNHandler handler(conv_pd, dev_ctx, mkldnn_engine, key); + + // create mkldnn memory from input tensors (data/weights) + auto user_src_memory_p = + handler.AcquireSrcMemory(user_src_md, to_void_cast(input_data)); + auto user_weights_memory_p = handler.AcquireWeightsMemory( + user_weights_md, to_void_cast(filter_data)); + + // create reorder primitive if the input format is not the preferred one + auto src_memory_p = + handler.AcquireSrcMemoryFromPrimitive(user_src_memory_p, pipeline); + auto weights_memory_p = handler.AcquireWeightsMemoryFromPrimitive( + user_weights_memory_p, pipeline); + auto dst_memory_p = + handler.AcquireDstMemoryFromPrimitive(to_void_cast(output_data)); + // create convolution op primitive - auto conv_prim = mkldnn::convolution_forward(*conv_pd, src_memory, - weights_memory, dst_memory); + auto conv_p = handler.AcquireConvolution(src_memory_p, weights_memory_p, + dst_memory_p); // push primitive to stream and wait until it's executed - std::vector pipeline{conv_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + pipeline.push_back(*conv_p); + stream(stream::kind::eager).submit(pipeline).wait(); + + output->set_layout(DataLayout::kMKLDNN); + output->set_format(GetMKLDNNFormat(*dst_memory_p)); } private: std::unique_ptr - ConvFwdPrimitiveDesc(const mkldnn::memory::desc& src, - const mkldnn::memory::desc& weights, - const mkldnn::memory::desc& dst, - const std::vector& strides, + ConvFwdPrimitiveDesc(const memory::desc& src, const memory::desc& weights, + const memory::desc& dst, const std::vector& strides, const std::vector& paddings, const mkldnn::engine& engine) const { - mkldnn::memory::dims stride_dims = {strides[0], strides[1]}; - mkldnn::memory::dims padding_dims = {paddings[0], paddings[1]}; + memory::dims stride_dims = {strides[0], strides[1]}; + memory::dims padding_dims = {paddings[0], paddings[1]}; auto conv_desc = mkldnn::convolution_forward::desc( mkldnn::prop_kind::forward, mkldnn::convolution_direct, src, weights, @@ -139,15 +367,25 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { Tensor* input_grad = ctx.Output(framework::GradVarName("Input")); Tensor* filter_grad = ctx.Output(framework::GradVarName("Filter")); - if (!input_grad && !filter_grad) return; + PADDLE_ENFORCE(input->layout() == DataLayout::kMKLDNN && + input->format() != memory::format::format_undef, + "Wrong layout/format set for Input tensor"); + PADDLE_ENFORCE(filter->layout() == DataLayout::kMKLDNN && + filter->format() != memory::format::format_undef, + "Wrong layout/format set for Filter tensor"); + PADDLE_ENFORCE(output->layout() == DataLayout::kMKLDNN && + output->format() != memory::format::format_undef, + "Wrong layout/format set for Output tensor"); + PADDLE_ENFORCE(output_grad->layout() == DataLayout::kMKLDNN && + output_grad->format() != memory::format::format_undef, + "Wrong layout/format set for output_grad tensor"); - // Get an unique name from "argument" name of "Output" variable - // This name will be used as key when saving info into device context - const std::string key = ctx.op().Input("Output"); - const std::string key_conv_pd = key + "@conv_pd"; + if (!input_grad && !filter_grad) return; std::vector strides = ctx.Attr>("strides"); std::vector paddings = ctx.Attr>("paddings"); + std::vector dilations = ctx.Attr>("dilations"); + int groups = ctx.Attr("groups"); const T* input_data = input->data(); const T* filter_data = filter->data(); @@ -167,24 +405,43 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { paddle::framework::vectorize2int(filter->dims()); std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); - // TODO(pzelazko-intel): support more formats + // Get an unique name from "argument" name of "Output" variable + // as well as attributes of primitive to be created + // This name will be used as key when saving info into device context + const std::string key = + ConvMKLDNNHandler::GetHash(src_tz, weights_tz, strides, paddings, + dilations, groups, ctx.op().Input("Output")); + + const std::string key_conv_pd = key + "@conv_pd"; + std::vector pipeline; + + // Create user memory descriptors + auto user_src_md = platform::MKLDNNMemDesc( + {src_tz}, platform::MKLDNNGetDataType(), input->format()); + auto user_weights_md = platform::MKLDNNMemDesc( + {weights_tz}, platform::MKLDNNGetDataType(), filter->format()); + auto user_diff_dst_md = platform::MKLDNNMemDesc( + {dst_tz}, platform::MKLDNNGetDataType(), output_grad->format()); + + /* create memory descriptor for conv backward without specified format + * ('any') which lets a primitive (conv backward in this case) choose + * the memory format preferred for best performance + */ + std::string data_format = ctx.Attr("data_format"); + auto chosen_memory_format = + platform::data_format_to_memory_format(data_format); + auto src_md = platform::MKLDNNMemDesc( - src_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + src_tz, platform::MKLDNNGetDataType(), chosen_memory_format); auto diff_src_md = platform::MKLDNNMemDesc( - src_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); - auto weights_md = - platform::MKLDNNMemDesc(weights_tz, mkldnn::memory::data_type::f32, - mkldnn::memory::format::oihw); - auto diff_weights_md = - platform::MKLDNNMemDesc(weights_tz, mkldnn::memory::data_type::f32, - mkldnn::memory::format::oihw); + src_tz, platform::MKLDNNGetDataType(), chosen_memory_format); + auto weights_md = platform::MKLDNNMemDesc( + weights_tz, platform::MKLDNNGetDataType(), chosen_memory_format); + auto diff_weights_md = platform::MKLDNNMemDesc( + weights_tz, platform::MKLDNNGetDataType(), chosen_memory_format); auto diff_dst_md = platform::MKLDNNMemDesc( - dst_tz, mkldnn::memory::data_type::f32, mkldnn::memory::format::nchw); + dst_tz, platform::MKLDNNGetDataType(), chosen_memory_format); - // create memory - auto diff_dst_memory = mkldnn::memory( - {diff_weights_md, mkldnn_engine}, - reinterpret_cast(const_cast(output_grad_data))); // Retrieve conv_pd from device context auto conv_pd = std::static_pointer_cast( @@ -192,83 +449,77 @@ class ConvMKLDNNGradOpKernel : public paddle::framework::OpKernel { PADDLE_ENFORCE(conv_pd != nullptr, "Fail to find conv_pd in device context"); + // create backward convolution weights primitive descriptor + auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc( + mkldnn::convolution_direct, src_md, diff_weights_md, diff_dst_md, + strides, paddings, paddings, mkldnn::padding_kind::zero); + auto conv_bwd_weights_pd = + std::make_shared( + conv_bwd_weights_desc, mkldnn_engine, *conv_pd); + + // create backward convolution data primitive descriptor + auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc( + mkldnn::convolution_direct, diff_src_md, weights_md, diff_dst_md, + strides, paddings, paddings, mkldnn::padding_kind::zero); + auto conv_bwd_data_pd = + std::make_shared( + conv_bwd_data_desc, mkldnn_engine, *conv_pd); + + ConvMKLDNNHandler handler(conv_pd, conv_bwd_data_pd, conv_bwd_weights_pd, + dev_ctx, mkldnn_engine, key); + + // create mkldnn memory from input tensors (data/weights) + auto user_src_memory_p = + handler.AcquireSrcMemory(user_src_md, to_void_cast(input_data)); + auto user_weights_memory_p = handler.AcquireWeightsMemory( + user_weights_md, to_void_cast(filter_data)); + auto user_diff_dst_memory_p = handler.AcquireDiffDstMemory( + user_diff_dst_md, to_void_cast(output_grad_data)); + // create backward conv primitive for weights if (filter_grad) { - // create primitive descriptor - mkldnn::convolution_backward_weights::primitive_desc conv_bwd_weights_pd = - ConvBwdWeightsPrimitiveDesc(src_md, diff_weights_md, diff_dst_md, - strides, paddings, *conv_pd, - mkldnn_engine); - - // create memory - auto diff_weights_memory = - mkldnn::memory({diff_weights_md, mkldnn_engine}, - reinterpret_cast(filter_grad_data)); - auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, - reinterpret_cast(const_cast(input_data))); + auto src_memory_p = handler.AcquireSrcMemoryFromWeightsPrimitive( + user_src_memory_p, pipeline); - // create backward conv primitive for weights - auto conv_bwd_weights_prim = mkldnn::convolution_backward_weights( - conv_bwd_weights_pd, src_memory, diff_dst_memory, - diff_weights_memory); + auto diff_dst_memory_4filter_p = + handler.AcquireDiffDstMemoryFromWeightsPrimitive( + user_diff_dst_memory_p, pipeline); - // push primitive and execute it - std::vector pipeline{conv_bwd_weights_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } + auto diff_weights_memory_p = + handler.AcquireDiffWeightsMemoryFromWeightsPrimitive( + reinterpret_cast(filter_grad_data)); - if (input_grad) { - // create primitive descriptor - mkldnn::convolution_backward_data::primitive_desc conv_bwd_data_pd = - ConvBwdDataPrimitiveDesc(diff_src_md, weights_md, diff_dst_md, - strides, paddings, *conv_pd, mkldnn_engine); - - // create memory - auto diff_src_memory = mkldnn::memory( - {diff_src_md, mkldnn_engine}, - reinterpret_cast(const_cast(input_grad_data))); - auto weights_memory = - mkldnn::memory({weights_md, mkldnn_engine}, - reinterpret_cast(const_cast(filter_data))); - - // create backward conv primitive for data - auto conv_bwd_data_prim = mkldnn::convolution_backward_data( - conv_bwd_data_pd, diff_dst_memory, weights_memory, diff_src_memory); + auto conv_bwd_weights_p = handler.AcquireConvolutionBackwardWeights( + src_memory_p, diff_dst_memory_4filter_p, diff_weights_memory_p); // push primitive to stream and wait until it's executed - std::vector pipeline{conv_bwd_data_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + pipeline.push_back(*conv_bwd_weights_p); + + filter_grad->set_layout(DataLayout::kMKLDNN); + filter_grad->set_format(GetMKLDNNFormat(*diff_weights_memory_p)); } - } // Compute() - private: - mkldnn::convolution_backward_weights::primitive_desc - ConvBwdWeightsPrimitiveDesc( - const mkldnn::memory::desc& src, const mkldnn::memory::desc& diff_weights, - const mkldnn::memory::desc& diff_dst, const std::vector& strides, - const std::vector& paddings, - const mkldnn::convolution_forward::primitive_desc& conv_pd, - const mkldnn::engine& engine) const { - auto conv_bwd_weights_desc = mkldnn::convolution_backward_weights::desc( - mkldnn::convolution_direct, src, diff_weights, diff_dst, strides, - paddings, paddings, mkldnn::padding_kind::zero); - return mkldnn::convolution_backward_weights::primitive_desc( - conv_bwd_weights_desc, engine, conv_pd); - } + if (input_grad) { + auto weights_memory_p = handler.AcquireWeightsMemoryFromDataPrimitive( + user_weights_memory_p, pipeline); - mkldnn::convolution_backward_data::primitive_desc ConvBwdDataPrimitiveDesc( - const mkldnn::memory::desc& diff_src, const mkldnn::memory::desc& weights, - const mkldnn::memory::desc& diff_dst, const std::vector& strides, - const std::vector& paddings, - const mkldnn::convolution_forward::primitive_desc& conv_pd, - const mkldnn::engine& engine) const { - auto conv_bwd_data_desc = mkldnn::convolution_backward_data::desc( - mkldnn::convolution_direct, diff_src, weights, diff_dst, strides, - paddings, paddings, mkldnn::padding_kind::zero); - return mkldnn::convolution_backward_data::primitive_desc(conv_bwd_data_desc, - engine, conv_pd); - } + auto diff_dst_memory_4data_p = + handler.AcquireDiffDstMemoryFromDataPrimitive(user_diff_dst_memory_p, + pipeline); + + auto diff_src_memory_p = handler.AcquireDiffSrcMemoryFromDataPrimitive( + reinterpret_cast(input_grad_data)); + + auto conv_bwd_data_p = handler.AcquireConvolutionBackwardData( + diff_dst_memory_4data_p, weights_memory_p, diff_src_memory_p); + + pipeline.push_back(*conv_bwd_data_p); + + input_grad->set_layout(DataLayout::kMKLDNN); + input_grad->set_format(GetMKLDNNFormat(*diff_src_memory_p)); + } + stream(stream::kind::eager).submit(pipeline).wait(); + } // Compute() }; } // namespace operators diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 92748993c3..37153d5843 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -75,6 +75,10 @@ void ConvOp::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { framework::LibraryType library{framework::LibraryType::kPlain}; + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout = framework::StringToDataLayout(data_format); + #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { library = framework::LibraryType::kCUDNN; @@ -84,6 +88,7 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( if (library == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; } #endif @@ -99,15 +104,11 @@ framework::OpKernelType ConvOp::GetExpectedKernelType( "float16 can only be used when CUDNN is used"); } - std::string data_format = ctx.Attr("data_format"); - // TODO(pzelazko-intel): enable MKLDNN layout when it's ready - framework::DataLayout layout = framework::StringToDataLayout(data_format); return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout, library); } -Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv2DOpMaker::Make() { AddInput( "Input", "(Tensor) The input tensor of convolution operator. " @@ -123,7 +124,8 @@ Conv2DOpMaker::Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker) "input image channels divided by the groups."); AddOutput("Output", "(Tensor) The output tensor of convolution operator. " - "The format of output tensor is also NCHW."); + "The format of output tensor is also NCHW.") + .Reuse("Input"); AddAttr>("strides", "(vector default:{1, 1}), the " "strides(h_stride, w_stride) of " @@ -200,8 +202,7 @@ $$ )DOC"); } -Conv3DOpMaker::Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv3DOpMaker::Make() { AddInput( "Input", "(Tensor) The input tensor of convolution operator. " @@ -219,7 +220,8 @@ Conv3DOpMaker::Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker) "input image channels divided by the groups."); AddOutput("Output", "(Tensor) The output tensor of convolution operator." - "The format of output tensor is also NCDHW."); + "The format of output tensor is also NCDHW.") + .Reuse("Input"); AddAttr>("strides", "(vector, default:{1, 1, 1}), the " "strides(d_stride, h_stride, w_stride) of " @@ -311,6 +313,10 @@ void ConvOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType ConvOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { framework::LibraryType library_{framework::LibraryType::kPlain}; + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { library_ = framework::LibraryType::kCUDNN; @@ -320,12 +326,10 @@ framework::OpKernelType ConvOpGrad::GetExpectedKernelType( if (library_ == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; } #endif - std::string data_format = ctx.Attr("data_format"); - // TODO(pzelazko-intel): enable MKLDNN layout when it's ready - framework::DataLayout layout_ = framework::StringToDataLayout(data_format); return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), layout_, library_); diff --git a/paddle/fluid/operators/conv_op.h b/paddle/fluid/operators/conv_op.h index f462f00c08..b3140116df 100644 --- a/paddle/fluid/operators/conv_op.h +++ b/paddle/fluid/operators/conv_op.h @@ -60,12 +60,12 @@ inline bool IsExpand(const std::vector& filter_dim, // operator implementations can reuse the code. class Conv2DOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv2DOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class Conv3DOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv3DOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class ConvOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/conv_shift_op.cc b/paddle/fluid/operators/conv_shift_op.cc index 82fdd30820..f2549e814d 100644 --- a/paddle/fluid/operators/conv_shift_op.cc +++ b/paddle/fluid/operators/conv_shift_op.cc @@ -75,8 +75,7 @@ class ConvShiftGradOp : public framework::OperatorWithKernel { class ConvShiftOpMaker : public framework::OpProtoAndCheckerMaker { public: - ConvShiftOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape B x M, " "where B is the batch size and M is the data dimension."); diff --git a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc index 901682edbb..82fff68e75 100644 --- a/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_cudnn_op.cu.cc @@ -44,6 +44,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); // cudnn v5 does not support dilations std::vector dilations = ctx.Attr>("dilations"); + int groups = ctx.Attr("groups"); int user_workspace_size = ctx.Attr("workspace_size_MB"); const T* input_data = input->data(); @@ -64,13 +65,13 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { // (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize2int(input->dims()), groups); // (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output->dims())); + layout, framework::vectorize2int(output->dims()), groups); // (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( - layout, framework::vectorize2int(filter->dims())); + layout, framework::vectorize2int(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); @@ -86,7 +87,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { auto& dev_ctx = ctx.template device_context(); auto handle = dev_ctx.cudnn_handle(); // Get the algorithm - PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, // dxDesc: Handle to the previously initialized output tensor // descriptor. @@ -94,7 +95,7 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { workspace_size_limit, &algo)); // get workspace size able to allocate - PADDLE_ENFORCE( + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize( handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_output_desc, algo, &workspace_size_in_bytes)); @@ -104,11 +105,17 @@ class CUDNNConvTransposeOpKernel : public framework::OpKernel { cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv transpose forward --------------------- + int input_offset = input->numel() / input->dims()[0] / groups; + int output_offset = output->numel() / output->dims()[0] / groups; + int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; - PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( - handle, &alpha, cudnn_filter_desc, filter_data, cudnn_input_desc, - input_data, cudnn_conv_desc, algo, cudnn_workspace, - workspace_size_in_bytes, &beta, cudnn_output_desc, output_data)); + for (int g = 0; g < groups; g++) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardData( + handle, &alpha, cudnn_filter_desc, filter_data + filter_offset * g, + cudnn_input_desc, input_data + input_offset * g, cudnn_conv_desc, + algo, cudnn_workspace, workspace_size_in_bytes, &beta, + cudnn_output_desc, output_data + output_offset * g)); + } // Release the cudnn workspace paddle::memory::Free(gpu, cudnn_workspace); @@ -134,6 +141,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { std::vector paddings = ctx.Attr>("paddings"); // cudnn v5 does not support dilations std::vector dilations = ctx.Attr>("dilations"); + int groups = ctx.Attr("groups"); int user_workspace_size = ctx.Attr("workspace_size_MB"); // ------------------- cudnn descriptors --------------------- @@ -145,13 +153,13 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { // Input: (N, M, H, W) or (N, M, D, H, W) cudnnTensorDescriptor_t cudnn_input_desc = input_desc.descriptor( - layout, framework::vectorize2int(input->dims())); + layout, framework::vectorize2int(input->dims()), groups); // Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w) cudnnTensorDescriptor_t cudnn_output_desc = output_desc.descriptor( - layout, framework::vectorize2int(output_grad->dims())); + layout, framework::vectorize2int(output_grad->dims()), groups); // Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w) cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor( - layout, framework::vectorize2int(filter->dims())); + layout, framework::vectorize2int(filter->dims()), groups); cudnnConvolutionDescriptor_t cudnn_conv_desc = conv_desc.descriptor(paddings, strides, dilations); @@ -170,11 +178,11 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { auto handle = dev_ctx.cudnn_handle(); if (input_grad) { // choose backward algorithm for data - PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardAlgorithm( handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_input_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT, workspace_size_limit, &data_algo)); - PADDLE_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( + CUDNN_ENFORCE(platform::dynload::cudnnGetConvolutionForwardWorkspaceSize( handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc, cudnn_input_desc, data_algo, &fwd_ws_size)); workspace_size_in_bytes = std::max(workspace_size_in_bytes, fwd_ws_size); @@ -182,7 +190,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { if (filter_grad) { // choose backward algorithm for filter - PADDLE_ENFORCE( + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm( handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_filter_desc, @@ -190,7 +198,7 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { workspace_size_limit, &filter_algo)); // get workspace for backwards filter algorithm - PADDLE_ENFORCE( + CUDNN_ENFORCE( platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize( handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc, cudnn_filter_desc, filter_algo, &bwd_filter_ws_size)); @@ -205,15 +213,22 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { cudnn_workspace = paddle::memory::Alloc(gpu, workspace_size_in_bytes); // ------------------- cudnn conv backward data --------------------- // FIXME(typhoonzero): template type T may not be the same as cudnn call. + int input_offset = input->numel() / input->dims()[0] / groups; + int output_grad_offset = + output_grad->numel() / output_grad->dims()[0] / groups; + int filter_offset = filter->numel() / groups; T alpha = 1.0f, beta = 0.0f; if (input_grad) { T* input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. - PADDLE_ENFORCE(platform::dynload::cudnnConvolutionForward( - handle, &alpha, cudnn_output_desc, output_grad_data, - cudnn_filter_desc, filter_data, cudnn_conv_desc, data_algo, - cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, - input_grad_data)); + for (int g = 0; g < groups; g++) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionForward( + handle, &alpha, cudnn_output_desc, + output_grad_data + output_grad_offset * g, cudnn_filter_desc, + filter_data + filter_offset * g, cudnn_conv_desc, data_algo, + cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_input_desc, + input_grad_data + input_offset * g)); + } } // ------------------- cudnn conv backward filter --------------------- @@ -221,11 +236,16 @@ class CUDNNConvTransposeGradOpKernel : public framework::OpKernel { T* filter_grad_data = filter_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset filter_grad. // Gradient with respect to the filter - PADDLE_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( - handle, &alpha, cudnn_output_desc, output_grad_data, cudnn_input_desc, - input_data, cudnn_conv_desc, filter_algo, cudnn_workspace, - workspace_size_in_bytes, &beta, cudnn_filter_desc, filter_grad_data)); + for (int g = 0; g < groups; g++) { + CUDNN_ENFORCE(platform::dynload::cudnnConvolutionBackwardFilter( + handle, &alpha, cudnn_output_desc, + output_grad_data + output_grad_offset * g, cudnn_input_desc, + input_data + input_offset * g, cudnn_conv_desc, filter_algo, + cudnn_workspace, workspace_size_in_bytes, &beta, cudnn_filter_desc, + filter_grad_data + filter_offset * g)); + } } + // Release the cudnn workspace paddle::memory::Free(gpu, cudnn_workspace); } diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index d699dcafa4..eeb98ee44f 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -32,6 +32,7 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { std::vector strides = ctx->Attrs().Get>("strides"); std::vector paddings = ctx->Attrs().Get>("paddings"); std::vector dilations = ctx->Attrs().Get>("dilations"); + int groups = ctx->Attrs().Get("groups"); PADDLE_ENFORCE(in_dims.size() == 4 || in_dims.size() == 5, "ConvTransposeOp intput should be 4-D or 5-D tensor."); @@ -48,10 +49,10 @@ void ConvTransposeOp::InferShape(framework::InferShapeContext* ctx) const { "ConvTransposeOp paddings dimension and dilations " "dimension should be the same."); PADDLE_ENFORCE_EQ(in_dims[1], filter_dims[0], - "In ConvTransposeOp, The input channel should be the same " - "as the number of filters."); + "In ConvTransposeOp, The number of input channels should " + "be equal to the number of filter's channels."); - std::vector output_shape({in_dims[0], filter_dims[1]}); + std::vector output_shape({in_dims[0], filter_dims[1] * groups}); for (size_t i = 0; i < strides.size(); ++i) { auto filter_extent = dilations[i] * (filter_dims[i + 2] - 1) + 1; output_shape.push_back((in_dims[i + 2] - 1) * strides[i] - 2 * paddings[i] + @@ -84,9 +85,7 @@ framework::OpKernelType ConvTransposeOp::GetExpectedKernelType( layout_, library_); } -Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(OpProto* proto, - OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv2DTransposeOpMaker::Make() { AddInput( "Input", "(Tensor) The input tensor of convolution transpose operator. " @@ -104,7 +103,10 @@ Conv2DTransposeOpMaker::Conv2DTransposeOpMaker(OpProto* proto, AddOutput("Output", "(Tensor) The output tensor of convolution transpose operator. " "The format of output tensor is also NCHW."); - + AddAttr("groups", + "(int default:1), the groups number of the convolution " + "transpose operator. ") + .SetDefault(1); AddAttr>("dilations", "(vector default:{1, 1}), the " "dilations(h_dilation, w_dilation) of convolution " @@ -154,7 +156,7 @@ Parameters(strides, paddings) are two elements. These two elements represent hei and width, respectively. The input(X) size and output(Out) size may be different. -Example: +For an example: Input: Input shape: $(N, C_{in}, H_{in}, W_{in})$ Filter shape: $(C_{in}, C_{out}, H_f, W_f)$ @@ -168,9 +170,7 @@ Example: )DOC"); } -Conv3DTransposeOpMaker::Conv3DTransposeOpMaker(OpProto* proto, - OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Conv3DTransposeOpMaker::Make() { AddInput("Input", "(Tensor) The input tensor of convolution transpose operator." "The format of input tensor is NCDHW. Where N is batch size, C is " @@ -208,6 +208,10 @@ Conv3DTransposeOpMaker::Conv3DTransposeOpMaker(OpProto* proto, "(vector default:{0, 0, 0}), paddings(d_pad, " "h_pad, w_pad) of convolution transpose operator.") .SetDefault({0, 0, 0}); + AddAttr("groups", + "(int default:1), the groups number of the convolution3d " + "transpose operator. ") + .SetDefault(1); AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") @@ -298,6 +302,7 @@ framework::OpKernelType ConvTransposeOpGrad::GetExpectedKernelType( namespace ops = paddle::operators; +// conv2d_transpose REGISTER_OPERATOR(conv2d_transpose, ops::ConvTransposeOp, ops::Conv2DTransposeOpMaker, paddle::framework::DefaultGradOpDescMaker); @@ -313,6 +318,7 @@ REGISTER_OP_CPU_KERNEL( ops::GemmConvTransposeGradKernel); +// conv3d_transpose REGISTER_OPERATOR(conv3d_transpose, ops::ConvTransposeOp, ops::Conv3DTransposeOpMaker, paddle::framework::DefaultGradOpDescMaker); @@ -327,3 +333,19 @@ REGISTER_OP_CPU_KERNEL( ops::GemmConvTransposeGradKernel, ops::GemmConvTransposeGradKernel); + +// depthwise conv2d_transpose +REGISTER_OPERATOR(depthwise_conv2d_transpose, ops::ConvTransposeOp, + ops::Conv2DTransposeOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(depthwise_conv2d_transpose_grad, ops::ConvTransposeOpGrad); + +REGISTER_OP_CPU_KERNEL( + depthwise_conv2d_transpose, + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); +REGISTER_OP_CPU_KERNEL( + depthwise_conv2d_transpose_grad, + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); diff --git a/paddle/fluid/operators/conv_transpose_op.cu.cc b/paddle/fluid/operators/conv_transpose_op.cu.cc index 640fa7d14a..a6d5665df8 100644 --- a/paddle/fluid/operators/conv_transpose_op.cu.cc +++ b/paddle/fluid/operators/conv_transpose_op.cu.cc @@ -15,25 +15,28 @@ limitations under the License. */ #include "paddle/fluid/operators/conv_transpose_op.h" namespace ops = paddle::operators; +using CUDA = paddle::platform::CUDADeviceContext; -REGISTER_OP_CUDA_KERNEL( - conv2d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CUDA_KERNEL( - conv2d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); - -REGISTER_OP_CUDA_KERNEL( - conv3d_transpose, - ops::GemmConvTransposeKernel, - ops::GemmConvTransposeKernel); -REGISTER_OP_CUDA_KERNEL( - conv3d_transpose_grad, - ops::GemmConvTransposeGradKernel, - ops::GemmConvTransposeGradKernel); +// conv2d +REGISTER_OP_CUDA_KERNEL(conv2d_transpose, + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); +REGISTER_OP_CUDA_KERNEL(conv2d_transpose_grad, + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); + +// conv3d +REGISTER_OP_CUDA_KERNEL(conv3d_transpose, + ops::GemmConvTransposeKernel, + ops::GemmConvTransposeKernel); +REGISTER_OP_CUDA_KERNEL(conv3d_transpose_grad, + ops::GemmConvTransposeGradKernel, + ops::GemmConvTransposeGradKernel); + +// depthwise conv2d +REGISTER_OP_CUDA_KERNEL(depthwise_conv2d_transpose, + ops::DepthwiseConvTransposeKernel, + ops::DepthwiseConvTransposeKernel); +REGISTER_OP_CUDA_KERNEL(depthwise_conv2d_transpose_grad, + ops::DepthwiseConvTransposeGradKernel, + ops::DepthwiseConvTransposeGradKernel); diff --git a/paddle/fluid/operators/conv_transpose_op.h b/paddle/fluid/operators/conv_transpose_op.h index 898121412b..0d9c6a62fe 100644 --- a/paddle/fluid/operators/conv_transpose_op.h +++ b/paddle/fluid/operators/conv_transpose_op.h @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/operators/math/depthwise_conv.h" #include "paddle/fluid/operators/math/im2col.h" #include "paddle/fluid/operators/math/vol2col.h" @@ -30,12 +31,12 @@ using DDim = framework::DDim; // operator implementations can reuse the code. class Conv2DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv2DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class Conv3DTransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - Conv3DTransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class ConvTransposeOp : public framework::OperatorWithKernel { @@ -70,7 +71,7 @@ class GemmConvTransposeKernel : public framework::OpKernel { std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); std::vector dilations = context.Attr>("dilations"); - // groups will alway be disabled in conv2dtranspose. + int groups = context.Attr("groups"); const int batch_size = static_cast(input->dims()[0]); @@ -81,10 +82,10 @@ class GemmConvTransposeKernel : public framework::OpKernel { // use col_shape in the im2col and col2im (or vol2col and col2vol) // calculation - // col_shape_vec: {c, k_h, k_w, h, w} or {c, k_d, k_h, k_w, d, h, w} + // col_shape_vec: {c/g, k_h, k_w, h, w} or {c/g, k_d, k_h, k_w, d, h, w} size_t data_dim = filter_shape_vec.size() - 2; std::vector col_shape_vec(1 + 2 * data_dim); - col_shape_vec[0] = output->dims()[1]; + col_shape_vec[0] = output->dims()[1] / groups; for (size_t j = 0; j < data_dim; ++j) { col_shape_vec[j + 1] = filter_shape_vec[j + 2]; col_shape_vec[j + 1 + data_dim] = input_shape_vec[j + 2]; @@ -92,7 +93,7 @@ class GemmConvTransposeKernel : public framework::OpKernel { DDim col_shape(framework::make_ddim(col_shape_vec)); // use col_matrix_shape in the gemm calculation - // size: (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) + // size: (c/g * k_h * k_w, h * w) or (c/g * k_d * k_h * k_w, d * h * w) DDim col_matrix_shape = framework::flatten_to_2d(col_shape, data_dim + 1); Tensor col; @@ -111,7 +112,7 @@ class GemmConvTransposeKernel : public framework::OpKernel { // input matrix size: (m, h * w) or (m, d * h * w) DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]}; - // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w) + // filter size: (m, c/g * k_h * k_w) or (m, c/g * k_d * k_h * k_w) DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]}; filter.Resize(filter_matrix_shape); @@ -121,6 +122,8 @@ class GemmConvTransposeKernel : public framework::OpKernel { auto blas = math::GetBlas(dev_ctx); set_zero(dev_ctx, output, static_cast(0)); + int in_step = static_cast(input->dims()[1]) / groups; + int out_step = static_cast(output->dims()[1]) / groups; math::Col2ImFunctor col2im; math::Col2VolFunctor col2vol; @@ -133,22 +136,29 @@ class GemmConvTransposeKernel : public framework::OpKernel { // output size: (c, o_h, o_w) or (c, o_d, o_h, o_w) Tensor output_batch = output->Slice(i, i + 1).Resize(output_shape); - // col_matrix = filter * input_batch - // of shape (c * k_h * k_w, h * w) or (c * k_d * k_h * k_w, d * h * w) - blas.MatMul(filter, true, input_batch, false, static_cast(1.0), - &col_matrix, static_cast(0.0)); - - if (data_dim == 2U) { - // col2im: col_matrix -> dy - // from (c * k_h * k_w, h * w) to (c, o_h, o_w) - col2im(dev_ctx, col, dilations, strides, - std::vector{paddings[0], paddings[1], paddings[0], - paddings[1]}, - &output_batch); - } else if (data_dim == 3U) { - // col2vol: col_matrix -> dy - // from (c * k_d * k_h * k_w, d * h * w) to (c, o_d, o_h, o_w) - col2vol(dev_ctx, col, dilations, strides, paddings, &output_batch); + for (int g = 0; g < groups; g++) { + Tensor in_slice = input_batch.Slice(g * in_step, (g + 1) * in_step); + Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step); + Tensor out_slice = output_batch.Slice(g * out_step, (g + 1) * out_step); + + // col_matrix = filter_slice * input_slice + // of shape (c/g * k_h * k_w, h * w) + // or (c/g * k_d * k_h * k_w, d * h * w) + blas.MatMul(filter_slice, true, in_slice, false, static_cast(1.0), + &col_matrix, static_cast(0.0)); + + if (data_dim == 2U) { + // col2im: col_matrix -> dy + // from (c/g * k_h * k_w, h * w) to (c/g, o_h, o_w) + col2im(dev_ctx, col, dilations, strides, + std::vector{paddings[0], paddings[1], paddings[0], + paddings[1]}, + &out_slice); + } else if (data_dim == 3U) { + // col2vol: col_matrix -> dy + // from (c/g * k_d * k_h * k_w, d * h * w) to (c/g, o_d, o_h, o_w) + col2vol(dev_ctx, col, dilations, strides, paddings, &out_slice); + } } } } @@ -174,6 +184,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { std::vector strides = context.Attr>("strides"); std::vector paddings = context.Attr>("paddings"); std::vector dilations = context.Attr>("dilations"); + int groups = context.Attr("groups"); const int batch_size = static_cast(input->dims()[0]); @@ -205,9 +216,11 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // input matrix size: (m, h * w) or (m, d * h * w) DDim input_matrix_shape = {input->dims()[1], col_matrix_shape[1]}; - // filter size: (m, c * k_h * k_w) or (m, c * k_d * k_h * k_w) - DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0]}; + // filter size: (m, c/g * k_h * k_w) or (m, c/g * k_d * k_h * k_w) + DDim filter_matrix_shape = {input->dims()[1], col_matrix_shape[0] / groups}; filter.Resize(filter_matrix_shape); + int in_step = static_cast(input->dims()[1]) / groups; + int col_step = static_cast(col_matrix_shape[0]) / groups; // convolution transpose grad on input: // im2col + gemm (similar to conv-forward) @@ -233,7 +246,7 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { if (input_grad) { input_grad->mutable_data(context.GetPlace()); } - if (filter_grad) { // filter size (m, c, k_h, k_w) + if (filter_grad) { // filter size (m, c/g, k_h, k_w) filter_grad->mutable_data(context.GetPlace()); set_zero(dev_ctx, filter_grad, static_cast(0)); filter_grad_ = *filter_grad; @@ -268,8 +281,17 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // or // (m, c * k_d * k_h * k_w) * (c * k_d * k_h * k_w, d * h * w) -> (m, // d, h, w) - blas.MatMul(filter, false, col_matrix, false, static_cast(1.0), - &input_grad_batch, static_cast(0.0)); + for (int g = 0; g < groups; g++) { + Tensor input_grad_slice = + input_grad_batch.Slice(g * in_step, (g + 1) * in_step); + Tensor filter_slice = filter.Slice(g * in_step, (g + 1) * in_step); + Tensor col_matrix_slice = + col_matrix.Slice(g * col_step, (g + 1) * col_step); + + blas.MatMul(filter_slice, false, col_matrix_slice, false, + static_cast(1.0), &input_grad_slice, + static_cast(0.0)); + } } if (filter_grad) { // input batch @@ -279,12 +301,90 @@ class GemmConvTransposeGradKernel : public framework::OpKernel { // or // (m, d * h * w) * (d * h * w, c * k_d * k_h * k_w) -> (m, c * k_d * // k_h * k_w) - blas.MatMul(in_batch, false, col_matrix, true, static_cast(1.0), - &filter_grad_, static_cast(1.0)); + for (int g = 0; g < groups; g++) { + Tensor in_batch_slice = + in_batch.Slice(g * in_step, (g + 1) * in_step); + Tensor filter_grad_slice = + filter_grad_.Slice(g * in_step, (g + 1) * in_step); + Tensor col_matrix_slice = + col_matrix.Slice(g * col_step, (g + 1) * col_step); + blas.MatMul(in_batch_slice, false, col_matrix_slice, true, + static_cast(1.0), &filter_grad_slice, + static_cast(1.0)); + } } } } } }; + +template +class DepthwiseConvTransposeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* input = context.Input("Input"); + Tensor filter = *context.Input("Filter"); + Tensor* output = context.Output("Output"); + output->mutable_data(context.GetPlace()); + + int groups = context.Attr("groups"); + PADDLE_ENFORCE_EQ(groups, filter.dims()[0]); + + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + std::vector dilations = context.Attr>("dilations"); + for (auto v : dilations) { + PADDLE_ENFORCE_EQ(v, 1); + } + + output->mutable_data(context.GetPlace()); + auto& dev_ctx = context.template device_context(); + math::SetConstant set_zero; + set_zero(dev_ctx, output, static_cast(0)); + + math::DepthwiseConvInputGradFunctor + depthwiseConvInputGrad; + depthwiseConvInputGrad(dev_ctx, *output, filter, *input, strides, paddings, + output); + } +}; + +template +class DepthwiseConvTransposeGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + const Tensor* input = context.Input("Input"); + const Tensor* output_grad = + context.Input(framework::GradVarName("Output")); + Tensor* input_grad = + context.Output(framework::GradVarName("Input")); + Tensor* filter_grad = + context.Output(framework::GradVarName("Filter")); + Tensor filter = *context.Input("Filter"); + + if (!input_grad && !filter_grad) return; + + auto& dev_ctx = context.template device_context(); + std::vector strides = context.Attr>("strides"); + std::vector paddings = context.Attr>("paddings"); + + if (input_grad) { + math::DepthwiseConvFunctor depthwiseConv; + depthwiseConv(dev_ctx, *output_grad, filter, strides, paddings, + input_grad); + } + + if (filter_grad) { + math::SetConstant set_zero; + filter_grad->mutable_data(context.GetPlace()); + set_zero(dev_ctx, filter_grad, static_cast(0)); + + math::DepthwiseConvFilterGradFunctor + depthwiseConvFilterGrad; + depthwiseConvFilterGrad(dev_ctx, *output_grad, *input, strides, paddings, + filter_grad); + } + } +}; } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/cos_sim_op.cc b/paddle/fluid/operators/cos_sim_op.cc index 04ca878e68..8f3644039f 100644 --- a/paddle/fluid/operators/cos_sim_op.cc +++ b/paddle/fluid/operators/cos_sim_op.cc @@ -62,8 +62,7 @@ class CosSimOp : public framework::OperatorWithKernel { class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { public: - CosSimOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The 1st input of cos_sim op."); AddInput("Y", "The 2nd input of cos_sim op."); AddOutput("Out", "The output of cos_sim op."); @@ -77,9 +76,9 @@ class CosSimOpMaker : public framework::OpProtoAndCheckerMaker { .AsIntermediate(); AddComment(R"DOC( -Cosine Similarity Operator. +**Cosine Similarity Operator** -$Out = X^T * Y / (\sqrt{X^T * X} * \sqrt{Y^T * Y})$ +$Out = \frac{X^T * Y}{(\sqrt{X^T * X} * \sqrt{Y^T * Y})}$ The input X and Y must have the same shape, except that the 1st dimension of input Y could be just 1 (different from input X), which will be diff --git a/paddle/fluid/operators/crf_decoding_op.cc b/paddle/fluid/operators/crf_decoding_op.cc index a83013c428..c27befe114 100644 --- a/paddle/fluid/operators/crf_decoding_op.cc +++ b/paddle/fluid/operators/crf_decoding_op.cc @@ -18,8 +18,7 @@ namespace paddle { namespace operators { class CRFDecodingOpMaker : public framework::OpProtoAndCheckerMaker { public: - CRFDecodingOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Emission", "(LoDTensor, default: LoDTensor). A LoDTensor with shape " "[N x D] where N is the size of the mini-batch and D is the total " @@ -54,21 +53,18 @@ sequence of observed tags. The output of this operator changes according to whether Input(Label) is given: 1. Input(Label) is given: - -This happens in training. This operator is used to co-work with the chunk_eval -operator. - -When Input(Label) is given, the crf_decoding operator returns a row vector -with shape [N x 1] whose values are fixed to be 0, indicating an incorrect -prediction, or 1 indicating a tag is correctly predicted. Such an output is the -input to chunk_eval operator. + This happens in training. This operator is used to co-work with the chunk_eval + operator. + When Input(Label) is given, the crf_decoding operator returns a row vector + with shape [N x 1] whose values are fixed to be 0, indicating an incorrect + prediction, or 1 indicating a tag is correctly predicted. Such an output is the + input to chunk_eval operator. 2. Input(Label) is not given: - -This is the standard decoding process. + This is the standard decoding process. The crf_decoding operator returns a row vector with shape [N x 1] whose values -range from 0 to maximum tag number - 1. Each element indicates an index of a +range from 0 to maximum tag number - 1, Each element indicates an index of a predicted tag. )DOC"); } diff --git a/paddle/fluid/operators/crop_op.cc b/paddle/fluid/operators/crop_op.cc index a8f1fbd529..a2a871efa8 100644 --- a/paddle/fluid/operators/crop_op.cc +++ b/paddle/fluid/operators/crop_op.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -48,12 +48,18 @@ class CropOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", y_dim); } } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } }; class CropOpMaker : public framework::OpProtoAndCheckerMaker { public: - CropOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of pad op. " "The input should be a k-D tensor(k > 0 and k < 7)."); @@ -61,13 +67,19 @@ class CropOpMaker : public framework::OpProtoAndCheckerMaker { "The input used as reference for cropping, " "which is of the same dimensions as X.") .AsDispensable(); + AddInput("Offsets", + "The input used to describe offsets in runtime, which is a " + "1-D vector whose size equals to the rank of input 'X'. The " + "elements data type must be int.") + .AsDispensable(); AddOutput("Out", "The output of crop op, " "which is of the same dimensions as X."); AddAttr>("offsets", "A list describing offsets to be cropped. " "The size of offsets list should be the same as " - "the dimension size of input X."); + "the dimension size of input X.") + .SetDefault(std::vector()); AddAttr>("shape", "A list describing the shape of output. " "The size of shape list should be the same as " @@ -78,6 +90,17 @@ Crop Operator. Crop input into output, as specified by offsets and shape. +There are two ways to set the offsets: +1. In runtime: Using the input 'Offsets', which is a Vairbale and can be + output of other operators. This way is suitable for + dynamic offsets. +2. In network configuration: Using the attribute 'offsets', which will be + set in Python configure script. This way is + suitable for fixed offsets. +You CANNOT use these two ways at the same time. An exception will be raised +if input 'Offset' is configured and meanwhile the attribute 'offsets' is +not empty. + There are two ways to set shape: 1. reference input: crop input X into the same shape as reference input. The dimension of reference input should @@ -147,6 +170,15 @@ class CropOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(x_grad_name, x_dims); } } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType( + ctx.Input(framework::GradVarName("Out")) + ->type()), + ctx.device_context()); + } }; } // namespace operators @@ -156,6 +188,7 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(crop, ops::CropOp, ops::CropOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(crop_grad, ops::CropOpGrad); -REGISTER_OP_CPU_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CPU_KERNEL( + crop, ops::CropKernel); REGISTER_OP_CPU_KERNEL( crop_grad, ops::CropGradKernel); diff --git a/paddle/fluid/operators/crop_op.cu b/paddle/fluid/operators/crop_op.cu index 1a39186046..b75678217e 100644 --- a/paddle/fluid/operators/crop_op.cu +++ b/paddle/fluid/operators/crop_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,6 +16,7 @@ limitations under the License. */ #include "paddle/fluid/operators/crop_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(crop, ops::CropKernel); +REGISTER_OP_CUDA_KERNEL( + crop, ops::CropKernel); REGISTER_OP_CUDA_KERNEL( crop_grad, ops::CropGradKernel); diff --git a/paddle/fluid/operators/crop_op.h b/paddle/fluid/operators/crop_op.h index f05c2e2328..2d7d33bd4f 100644 --- a/paddle/fluid/operators/crop_op.h +++ b/paddle/fluid/operators/crop_op.h @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -27,36 +27,106 @@ template ; using framework::Tensor; -template +static std::vector GetOffsets(const framework::ExecutionContext& ctx) { + std::vector res; + int rank = ctx.Input("X")->dims().size(); + if (ctx.HasInput("Offsets")) { + PADDLE_ENFORCE(ctx.Attr>("offsets").empty(), + "Input 'Offsets' and attribute 'offsets' should not be used " + "at the same time."); + const auto* offsets_tensor = ctx.Input("Offsets"); + PADDLE_ENFORCE_EQ(offsets_tensor->dims().size(), 1); + PADDLE_ENFORCE_EQ( + rank, offsets_tensor->dims()[0], + "Offsets size should be equal to dimension size of input tensor."); + const int* offsets_data; + framework::Tensor cpu_tmp_tensor; + if (platform::is_cpu_place(offsets_tensor->place())) { + offsets_data = offsets_tensor->data(); + } else { + framework::TensorCopySync(*offsets_tensor, platform::CPUPlace(), + &cpu_tmp_tensor); + offsets_data = cpu_tmp_tensor.data(); + } + res = std::vector(offsets_data, offsets_data + rank); + } else { + res = ctx.Attr>("offsets"); + PADDLE_ENFORCE_EQ( + rank, static_cast(res.size()), + "Offsets size should be equal to dimension size of input tensor."); + } + return res; +} + +template +void CropFunction(const framework::ExecutionContext& context) { + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + auto out_dims = out->dims(); + if (out_dims[0] == -1) { + out_dims[0] = x->dims()[0]; + } + out->mutable_data(out_dims, context.GetPlace()); + auto x_stride = framework::stride(x->dims()); + auto out_stride = framework::stride(out->dims()); + auto offsets = GetOffsets(context); + int64_t offset = 0; + for (size_t i = 0; i < offsets.size(); ++i) { + offset += (x_stride[i] * offsets[i]); + } + + auto x_tensor = EigenTensor::From(*x); + auto out_tensor = EigenTensor::From(*out); + Eigen::array e_offsets; + Eigen::array e_shape; + for (size_t i = 0; i < D; ++i) { + e_offsets[i] = offsets[i]; + e_shape[i] = out->dims()[i]; + } + auto& place = + *context.template device_context().eigen_device(); + out_tensor.device(place) = x_tensor.slice(e_offsets, e_shape); +} + +template class CropKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { - auto* x = context.Input("X"); - auto* out = context.Output("Out"); - const T* x_data = x->data(); - T* out_data = out->mutable_data(context.GetPlace()); - auto x_stride = framework::stride(x->dims()); - auto out_stride = framework::stride(out->dims()); - auto offsets = context.Attr>("offsets"); - PADDLE_ENFORCE_EQ( - x->dims().size(), static_cast(offsets.size()), - "Offsets size should be equal to dimension size of input tensor."); - int64_t offset = 0; - for (size_t i = 0; i < offsets.size(); ++i) { - offset += (x_stride[i] * offsets[i]); + int rank = context.Input("X")->dims().size(); + switch (rank) { + case 1: + CropFunction(context); + break; + case 2: + CropFunction(context); + break; + case 3: + CropFunction(context); + break; + case 4: + CropFunction(context); + break; + case 5: + CropFunction(context); + break; + case 6: + CropFunction(context); + break; + default: + PADDLE_THROW( + "CropOp only support tensors with no more than 6 dimensions."); } - StridedMemcpy(context.device_context(), x_data + offset, x_stride, - out->dims(), out_stride, out_data); } }; template void CropGradFunction(const framework::ExecutionContext& context) { auto* d_x = context.Output(framework::GradVarName("X")); + auto* x = context.Input("X"); if (d_x != nullptr) { auto* d_out = context.Input(framework::GradVarName("Out")); - d_x->mutable_data(context.GetPlace()); - auto offsets = context.Attr>("offsets"); + d_x->mutable_data(x->dims(), context.GetPlace()); + auto offsets = GetOffsets(context); Eigen::array, D> paddings; for (size_t i = 0; i < D; ++i) { paddings[i].first = offsets[i]; diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index 2b2a9dc831..a3bec3da45 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -111,8 +111,7 @@ class CrossEntropyGradientOp : public framework::OperatorWithKernel { class CrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - CrossEntropyOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape [N x D]," " where N is the batch size and D is the number of classes. " diff --git a/paddle/fluid/operators/ctc_align_op.cc b/paddle/fluid/operators/ctc_align_op.cc index 19e7649660..d2b440d9d2 100644 --- a/paddle/fluid/operators/ctc_align_op.cc +++ b/paddle/fluid/operators/ctc_align_op.cc @@ -44,8 +44,7 @@ class CTCAlignOp : public framework::OperatorWithKernel { class CTCAlignOpMaker : public framework::OpProtoAndCheckerMaker { public: - CTCAlignOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LodTensor, default: LoDTensor), Its shape is " "[Lp, 1], where Lp is the sum of all input sequences' length."); diff --git a/paddle/fluid/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc index f7c516a0ba..5302b822d6 100644 --- a/paddle/fluid/operators/cumsum_op.cc +++ b/paddle/fluid/operators/cumsum_op.cc @@ -29,21 +29,20 @@ class CumOp : public framework::OperatorWithKernel { class CumsumOpMaker : public framework::OpProtoAndCheckerMaker { public: - CumsumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "Input of Cumsum operator"); - AddOutput("Out", "Output of Cumsum operator"); + void Make() override { + AddInput("X", "Input of cumsum operator"); + AddOutput("Out", "Output of cumsum operator"); AddAttr("axis", - "(int, default -1). The dimenstion to accumulate along. " - "-1 means the last dimenstion") + "The dimenstion to accumulate along. -1 means the last " + "dimenstion [default -1].") .SetDefault(-1) .EqualGreaterThan(-1); AddAttr("exclusive", - "bool, default false). Whether to perform exclusive cumsum") + "Whether to perform exclusive cumsum. [default false].") .SetDefault(false); AddAttr("reverse", - "bool, default false). If true, the cumsum is performed in " - "the reversed direction") + "If true, the cumsum is performed in the reversed direction. " + "[default false].") .SetDefault(false); AddComment(R"DOC( The cumulative sum of the elements along a given axis. diff --git a/paddle/fluid/operators/decayed_adagrad_op.cc b/paddle/fluid/operators/decayed_adagrad_op.cc index 5a1315fb2a..c0f2b49a04 100644 --- a/paddle/fluid/operators/decayed_adagrad_op.cc +++ b/paddle/fluid/operators/decayed_adagrad_op.cc @@ -62,8 +62,7 @@ class DecayedAdagradOp : public framework::OperatorWithKernel { class DecayedAdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - DecayedAdagradOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor) Input parameter"); AddInput("Grad", "(Tensor) Input gradient"); AddInput("Moment", "(Tensor) Second moment"); diff --git a/paddle/fluid/operators/delete_var_op.cc b/paddle/fluid/operators/delete_var_op.cc index 1fe9404c00..d7a9bfbc43 100644 --- a/paddle/fluid/operators/delete_var_op.cc +++ b/paddle/fluid/operators/delete_var_op.cc @@ -34,8 +34,7 @@ class DeleteVarOp : public framework::OperatorBase { class DeleteVarOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - DeleteVarOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of delete op").AsDuplicable(); AddComment(R"DOC( Delete Operator. diff --git a/paddle/fluid/operators/detail/CMakeLists.txt b/paddle/fluid/operators/detail/CMakeLists.txt deleted file mode 100644 index 719a7465b8..0000000000 --- a/paddle/fluid/operators/detail/CMakeLists.txt +++ /dev/null @@ -1,9 +0,0 @@ -if(WITH_DISTRIBUTE) - grpc_library(sendrecvop_grpc SRCS bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc - grpc_server.cc variable_response.cc PROTO send_recv.proto DEPS lod_tensor selected_rows) - set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") - set_source_files_properties(serde_test.cc grpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) - cc_test(serde_test SRCS serde_test.cc variable_response.cc DEPS grpc++_unsecure grpc_unsecure gpr - cares zlib protobuf sendrecvop_grpc) - cc_test(grpc_server_test SRCS grpc_server_test.cc DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf executor proto_desc lookup_table_op) -endif() diff --git a/paddle/fluid/operators/detail/grpc_server.cc b/paddle/fluid/operators/detail/grpc_server.cc deleted file mode 100644 index e6ee28ea8d..0000000000 --- a/paddle/fluid/operators/detail/grpc_server.cc +++ /dev/null @@ -1,386 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/detail/grpc_server.h" - -#include -#include - -using ::grpc::ServerAsyncResponseWriter; - -namespace paddle { -namespace operators { -namespace detail { - -enum CallStatus { PROCESS = 0, FINISH }; - -// reference: -// https://stackoverflow.com/questions/41732884/grpc-multiple-services-in-cpp-async-server -class RequestBase { - public: - explicit RequestBase(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - const platform::DeviceContext* dev_ctx) - : service_(service), - cq_(cq), - sync_mode_(sync_mode), - status_(PROCESS), - dev_ctx_(dev_ctx) { - PADDLE_ENFORCE(cq_); - } - virtual ~RequestBase() {} - virtual void Process() { assert(false); } - - CallStatus Status() { return status_; } - void SetStatus(CallStatus status) { status_ = status; } - virtual std::string GetReqName() { - assert(false); - return ""; - } - - protected: - ::grpc::ServerContext ctx_; - GrpcService::AsyncService* service_; - ::grpc::ServerCompletionQueue* cq_; - const bool sync_mode_; - CallStatus status_; - const platform::DeviceContext* dev_ctx_; -}; - -class RequestSend final : public RequestBase { - public: - explicit RequestSend(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - framework::Scope* scope, ReceivedQueue* queue, - const platform::DeviceContext* dev_ctx) - : RequestBase(service, cq, sync_mode, dev_ctx), - queue_(queue), - responder_(&ctx_) { - if (sync_mode_) { - request_.reset(new VariableResponse(scope, dev_ctx_, false)); - } else { - request_.reset(new VariableResponse(scope, dev_ctx_, true)); - } - int method_id = static_cast(detail::GrpcMethod::kSendVariable); - service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, - cq_, cq_, this); - } - - virtual ~RequestSend() {} - - virtual std::string GetReqName() { return request_->Varname(); } - - virtual void Process() { - std::string var_name = GetReqName(); - VLOG(3) << "RequestSend " << var_name; - queue_->Push(std::make_pair(var_name, request_)); - - sendrecv::VoidMessage reply; - responder_.Finish(reply, ::grpc::Status::OK, this); - status_ = FINISH; - } - - protected: - std::shared_ptr request_; - ReceivedQueue* queue_; - ServerAsyncResponseWriter responder_; -}; - -class RequestGet final : public RequestBase { - public: - explicit RequestGet(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - framework::Scope* scope, - const platform::DeviceContext* dev_ctx, - framework::BlockingQueue* queue) - : RequestBase(service, cq, sync_mode, dev_ctx), - responder_(&ctx_), - scope_(scope), - queue_(queue) { - auto method_id = static_cast(detail::GrpcMethod::kGetVariable); - service_->RequestAsyncUnary(method_id, &ctx_, &request_, &responder_, cq_, - cq_, this); - } - - virtual ~RequestGet() {} - - virtual std::string GetReqName() { return request_.varname(); } - - virtual void Process() { - // proc request. - std::string var_name = request_.varname(); - VLOG(3) << "RequestGet " << var_name; - auto* var = scope_->FindVar(var_name); - - ::grpc::ByteBuffer reply; - if (var_name != FETCH_BARRIER_MESSAGE) { - SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); - } - - responder_.Finish(reply, ::grpc::Status::OK, this); - status_ = FINISH; - - if (var_name == FETCH_BARRIER_MESSAGE) { - sendrecv::VariableMessage msg; - MessageWithName msg_with_name = std::make_pair(var_name, msg); - queue_->Push(msg_with_name); - } - } - - protected: - sendrecv::VariableMessage request_; - ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; - framework::Scope* scope_; - framework::BlockingQueue* queue_; -}; - -class RequestPrefetch final : public RequestBase { - public: - explicit RequestPrefetch(GrpcService::AsyncService* service, - ::grpc::ServerCompletionQueue* cq, bool sync_mode, - framework::Scope* scope, - const platform::DeviceContext* dev_ctx, - framework::Executor* executor, - framework::ProgramDesc* program, - framework::ExecutorPrepareContext* prefetch_ctx) - : RequestBase(service, cq, sync_mode, dev_ctx), - responder_(&ctx_), - scope_(scope), - executor_(executor), - program_(program), - prefetch_ctx_(prefetch_ctx) { - if (sync_mode_) { - request_.reset(new VariableResponse(scope, dev_ctx_, false)); - } else { - request_.reset(new VariableResponse(scope, dev_ctx_, true)); - } - int method_id = static_cast(detail::GrpcMethod::kPrefetchVariable); - service_->RequestAsyncUnary(method_id, &ctx_, request_.get(), &responder_, - cq_, cq_, this); - } - - virtual ~RequestPrefetch() {} - - virtual std::string GetReqName() { return request_->Varname(); } - - virtual void Process() { - // prefetch process... - ::grpc::ByteBuffer reply; - - std::string var_name = request_->OutVarname(); - VLOG(3) << "RequestPrefetch " << var_name; - auto var_desc = program_->Block(0).FindVar(var_name); - framework::Scope* local_scope = &scope_->NewScope(); - auto* var = local_scope->FindVar(var_name); - InitializeVariable(var, var_desc->GetType()); - executor_->RunPreparedContext(prefetch_ctx_, scope_, false, false); - - SerializeToByteBuffer(var_name, var, *dev_ctx_, &reply); - - responder_.Finish(reply, ::grpc::Status::OK, this); - status_ = FINISH; - } - - protected: - std::shared_ptr request_; - ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; - framework::Scope* scope_; - framework::Executor* executor_; - framework::ProgramDesc* program_; - framework::ExecutorPrepareContext* prefetch_ctx_; -}; - -void AsyncGRPCServer::WaitClientGet(int count) { - int fetch_barriers = 0; - while (fetch_barriers < count) { - auto msg = var_get_queue_.Pop(); - if (msg.first == FETCH_BARRIER_MESSAGE) { - fetch_barriers++; - } - } -} - -void AsyncGRPCServer::WaitServerReady() { - std::unique_lock lock(this->mutex_ready_); - condition_ready_.wait(lock, [=] { return this->ready_ == 1; }); -} - -void AsyncGRPCServer::RunSyncUpdate() { - ::grpc::ServerBuilder builder; - builder.AddListeningPort(address_, ::grpc::InsecureServerCredentials(), - &selected_port_); - builder.SetMaxSendMessageSize(std::numeric_limits::max()); - builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); - builder.RegisterService(&service_); - - cq_send_ = builder.AddCompletionQueue(); - cq_get_ = builder.AddCompletionQueue(); - cq_prefetch_ = builder.AddCompletionQueue(); - - server_ = builder.BuildAndStart(); - LOG(INFO) << "Server listening on " << address_ - << " selected port: " << selected_port_; - - std::function send_register = - std::bind(&AsyncGRPCServer::TryToRegisterNewSendOne, this); - std::function get_register = - std::bind(&AsyncGRPCServer::TryToRegisterNewGetOne, this); - std::function prefetch_register = - std::bind(&AsyncGRPCServer::TryToRegisterNewPrefetchOne, this); - - // TODO(wuyi): Run these "HandleRequest" in thread pool - t_send_.reset( - new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, - cq_send_.get(), "cq_send", send_register))); - t_get_.reset( - new std::thread(std::bind(&AsyncGRPCServer::HandleRequest, this, - cq_get_.get(), "cq_get", get_register))); - t_prefetch_.reset(new std::thread( - std::bind(&AsyncGRPCServer::HandleRequest, this, cq_prefetch_.get(), - "cq_prefetch", prefetch_register))); - - { - std::lock_guard lock(this->mutex_ready_); - ready_ = 1; - } - condition_ready_.notify_all(); - // wait server - server_->Wait(); - t_send_->join(); - t_get_->join(); - t_prefetch_->join(); -} - -void AsyncGRPCServer::ShutdownQueue() { - std::unique_lock lock(cq_mutex_); - cq_send_->Shutdown(); - cq_get_->Shutdown(); - cq_prefetch_->Shutdown(); -} - -// This URL explains why shutdown is complicate: -void AsyncGRPCServer::ShutDown() { - is_shut_down_ = true; - ShutdownQueue(); - server_->Shutdown(); -} - -void AsyncGRPCServer::TryToRegisterNewSendOne() { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewSendOne"; - return; - } - RequestSend* send = new RequestSend(&service_, cq_send_.get(), sync_mode_, - scope_, &var_recv_queue_, dev_ctx_); - VLOG(4) << "Create RequestSend status:" << send->Status(); -} - -void AsyncGRPCServer::TryToRegisterNewGetOne() { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewGetOne"; - return; - } - RequestGet* get = new RequestGet(&service_, cq_get_.get(), sync_mode_, scope_, - dev_ctx_, &var_get_queue_); - VLOG(4) << "Create RequestGet status:" << get->Status(); -} - -void AsyncGRPCServer::TryToRegisterNewPrefetchOne() { - std::unique_lock lock(cq_mutex_); - if (is_shut_down_) { - VLOG(3) << "shutdown, do not TryToRegisterNewPrefetchOne"; - return; - } - RequestPrefetch* prefetch = - new RequestPrefetch(&service_, cq_prefetch_.get(), sync_mode_, scope_, - dev_ctx_, executor_, program_, prefetch_ctx_); - - VLOG(4) << "Create RequestPrefetch status:" << prefetch->Status(); -} - -// FIXME(typhoonzero): change cq_name to enum. -void AsyncGRPCServer::HandleRequest(::grpc::ServerCompletionQueue* cq, - const std::string& cq_name, - std::function TryToRegisterNewOne) { - TryToRegisterNewOne(); - - void* tag = NULL; - bool ok = false; - - while (true) { - VLOG(3) << "HandleRequest for " << cq_name << " wait Next"; - if (!cq->Next(&tag, &ok)) { - LOG(INFO) << cq_name << " CompletionQueue shutdown!"; - break; - } - VLOG(3) << "HandleRequest for " << cq_name << " get Next"; - - PADDLE_ENFORCE(tag); - - if (sync_mode_) { - // FIXME(typhoonzero): de-couple the barriers with recv_op - if (!is_shut_down_ && cq_name == "cq_get") WaitCond(1); - if (!is_shut_down_ && cq_name == "cq_send") WaitCond(0); - VLOG(3) << "HandleRequest for " << cq_name << " after WaitCond"; - } - - RequestBase* base = reinterpret_cast(tag); - // reference: - // https://github.com/tensorflow/tensorflow/issues/5596 - // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM - // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I - if (!ok) { - LOG(WARNING) << cq_name << " recv no regular event:argument name[" - << base->GetReqName() << "]"; - TryToRegisterNewOne(); - delete base; - continue; - } - - switch (base->Status()) { - case PROCESS: { - TryToRegisterNewOne(); - base->Process(); - VLOG(4) << cq_name << " PROCESS status:" << base->Status(); - break; - } - case FINISH: { - VLOG(4) << cq_name << " FINISH status:" << base->Status(); - delete base; - break; - } - default: { assert(false); } - } - } -} - -void AsyncGRPCServer::WaitCond(int cond) { - std::unique_lock lock(this->barrier_mutex_); - barrier_condition_.wait(lock, - [=] { return this->barrier_cond_step_ == cond; }); -} - -void AsyncGRPCServer::SetCond(int cond) { - { - std::lock_guard lock(this->barrier_mutex_); - barrier_cond_step_ = cond; - } - barrier_condition_.notify_all(); -} - -} // namespace detail -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_server.h b/paddle/fluid/operators/detail/grpc_server.h deleted file mode 100644 index 7f9cae21cc..0000000000 --- a/paddle/fluid/operators/detail/grpc_server.h +++ /dev/null @@ -1,130 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include // NOLINT -#include - -#include "grpc++/grpc++.h" -#include "paddle/fluid/framework/blocking_queue.h" -#include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/operators/detail/grpc_service.h" -#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" -#include "paddle/fluid/operators/detail/send_recv.pb.h" -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" - -namespace paddle { -namespace operators { -namespace detail { - -typedef std::pair> - ReceivedMessage; -typedef framework::BlockingQueue ReceivedQueue; - -typedef std::pair MessageWithName; -class RequestBase; - -class AsyncGRPCServer final { - public: - explicit AsyncGRPCServer(const std::string &address, bool sync_mode) - : address_(address), sync_mode_(sync_mode), ready_(0) {} - - void WaitServerReady(); - void RunSyncUpdate(); - - // functions to sync server barrier status. - void WaitCond(int cond); - void SetCond(int cond); - void WaitClientGet(int count); - - void SetScope(framework::Scope *scope) { scope_ = scope; } - - void SetDevCtx(const platform::DeviceContext *dev_ctx) { dev_ctx_ = dev_ctx; } - - void SetProgram(framework::ProgramDesc *program) { program_ = program; } - - void SetExecutor(framework::Executor *executor) { executor_ = executor; } - - void SetPrefetchPreparedCtx(framework::ExecutorPrepareContext *prepared) { - prefetch_ctx_ = prepared; - } - - int GetSelectedPort() const { return selected_port_; } - - const ReceivedMessage Get() { return this->var_recv_queue_.Pop(); } - - void Push(const std::string &msg_name) { - this->var_recv_queue_.Push(std::make_pair(msg_name, nullptr)); - } - - void ShutDown(); - - protected: - void HandleRequest(::grpc::ServerCompletionQueue *cq, - const std::string &cq_name, - std::function TryToRegisterNewOne); - void TryToRegisterNewSendOne(); - void TryToRegisterNewGetOne(); - void TryToRegisterNewPrefetchOne(); - void ShutdownQueue(); - - private: - std::mutex cq_mutex_; - volatile bool is_shut_down_ = false; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_send_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_get_; - std::unique_ptr<::grpc::ServerCompletionQueue> cq_prefetch_; - - GrpcService::AsyncService service_; - std::unique_ptr<::grpc::Server> server_; - - std::string address_; - const bool sync_mode_; - framework::Scope *scope_; - const platform::DeviceContext *dev_ctx_; - - // received variable from RPC, operators fetch variable from this queue. - framework::BlockingQueue var_get_queue_; - // client send variable to this queue. - ReceivedQueue var_recv_queue_; - - // condition of the sub program - std::mutex barrier_mutex_; - mutable int barrier_cond_step_; - std::condition_variable barrier_condition_; - - std::unique_ptr t_send_; - std::unique_ptr t_get_; - std::unique_ptr t_prefetch_; - - framework::ExecutorPrepareContext *prefetch_ctx_; - framework::ProgramDesc *program_; - framework::Executor *executor_; - int selected_port_; - - std::mutex mutex_ready_; - std::condition_variable condition_ready_; - int ready_; -}; - -}; // namespace detail -}; // namespace operators -}; // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_server_test.cc b/paddle/fluid/operators/detail/grpc_server_test.cc deleted file mode 100644 index 25b95d608d..0000000000 --- a/paddle/fluid/operators/detail/grpc_server_test.cc +++ /dev/null @@ -1,140 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include // NOLINT - -#include "gtest/gtest.h" -#include "paddle/fluid/operators/detail/grpc_client.h" -#include "paddle/fluid/operators/detail/grpc_server.h" - -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/framework/operator.h" - -namespace framework = paddle::framework; -namespace platform = paddle::platform; -namespace detail = paddle::operators::detail; - -USE_OP(lookup_table); - -std::unique_ptr rpc_service_; - -framework::BlockDesc* AppendPrefetchBlcok(framework::ProgramDesc* program) { - auto root_block = program->MutableBlock(0); - auto* block = program->AppendBlock(*root_block); - - framework::VariableNameMap input({{"W", {"w"}}, {"Ids", {"ids"}}}); - framework::VariableNameMap output({{"Output", {"out"}}}); - auto op = block->AppendOp(); - op->SetType("lookup_table"); - op->SetInput("W", {"w"}); - op->SetInput("Ids", {"ids"}); - op->SetOutput("Out", {"out"}); - - auto& out = *root_block->Var("out"); - out.SetType(framework::proto::VarType::SELECTED_ROWS); - out.SetShape({10, 10}); - - return block; -} - -void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { - auto w_var = scope->Var("w"); - w_var->GetMutable(); - - auto out_var = scope->Var("out"); - out_var->GetMutable(); - - auto ids_var = scope->Var("ids"); - ids_var->GetMutable(); -} - -void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, - int64_t rows_numel) { - CreateVarsOnScope(scope, place); - auto ids_var = scope->Var("ids")->GetMutable(); - auto rows = ids_var->mutable_rows(); - for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i * 2); - ids_var->mutable_value()->Resize({rows_numel, 1}); - ids_var->mutable_value()->mutable_data(*place); -} - -void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, - int64_t rows_numel) { - CreateVarsOnScope(scope, place); - auto w = scope->Var("w")->GetMutable(); - auto rows = w->mutable_rows(); - for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i); - auto w_value = w->mutable_value(); - w_value->Resize({rows_numel, 10}); - - auto ptr = w_value->mutable_data(*place); - - for (int64_t i = 0; i < w_value->numel(); ++i) { - ptr[i] = static_cast(i / 10); - } -} - -void StartServer(const std::string& endpoint) { - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, true)); - framework::ProgramDesc program; - framework::Scope scope; - platform::CPUPlace place; - framework::Executor exe(place); - platform::CPUDeviceContext ctx(place); - auto* block = AppendPrefetchBlcok(&program); - auto prepared = exe.Prepare(program, block->ID()); - InitTensorsOnServer(&scope, &place, 10); - - rpc_service_->SetProgram(&program); - rpc_service_->SetPrefetchPreparedCtx(prepared.get()); - rpc_service_->SetDevCtx(&ctx); - rpc_service_->SetScope(&scope); - rpc_service_->SetExecutor(&exe); - - rpc_service_->RunSyncUpdate(); -} - -TEST(PREFETCH, CPU) { - // start up a server instance backend - std::thread server_thread(StartServer, "127.0.0.1:8889"); - sleep(2); - framework::Scope scope; - platform::CPUPlace place; - platform::CPUDeviceContext ctx(place); - // create var on local scope - int64_t rows_numel = 5; - InitTensorsOnClient(&scope, &place, rows_numel); - std::string in_var_name("ids"); - std::string out_var_name("out"); - - detail::RPCClient client; - client.AsyncPrefetchVariable("127.0.0.1:8889", ctx, scope, in_var_name, - out_var_name); - client.Wait(); - - auto var = scope.Var(out_var_name); - auto value = var->GetMutable()->value(); - auto ptr = value.mutable_data(place); - - rpc_service_->ShutDown(); - server_thread.join(); - rpc_service_.reset(nullptr); - - for (int64_t i = 0; i < rows_numel; ++i) { - EXPECT_EQ(ptr[0 + i * value.dims()[1]], static_cast(i * 2)); - } -} diff --git a/paddle/fluid/operators/detail/macros.h b/paddle/fluid/operators/detail/macros.h new file mode 100644 index 0000000000..6f4a15caa5 --- /dev/null +++ b/paddle/fluid/operators/detail/macros.h @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#ifdef PADDLE_WITH_DISTRIBUTE + +#ifdef PADDLE_WITH_GRPC + +#include "paddle/fluid/operators/distributed/grpc_client.h" +#include "paddle/fluid/operators/distributed/grpc_server.h" +#define RPCSERVER_T paddle::operators::distributed::AsyncGRPCServer +#define RPCCLIENT_T paddle::operators::distributed::GRPCClient + +#else // PADDLE_WITH_GRPC + +#include "paddle/fluid/operators/distributed/brpc_client.h" +#include "paddle/fluid/operators/distributed/brpc_server.h" +#define RPCSERVER_T paddle::operators::distributed::AsyncBRPCServer +#define RPCCLIENT_T paddle::operators::distributed::BRPCClient + +#endif // PADDLE_WITH_GRPC + +#endif // PADDLE_WITH_DISTRIBUTE diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.cc b/paddle/fluid/operators/detail/sendrecvop_utils.cc deleted file mode 100644 index d68cf467f7..0000000000 --- a/paddle/fluid/operators/detail/sendrecvop_utils.cc +++ /dev/null @@ -1,206 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" - -#include -#include // NOLINT - -#include "google/protobuf/io/coded_stream.h" -#include "google/protobuf/io/zero_copy_stream.h" -#include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/operators/detail/bytebuffer_stream.h" -#include "paddle/fluid/operators/detail/proto_encoder_helper.h" -#include "paddle/fluid/operators/detail/variable_response.h" -#include "paddle/fluid/platform/profiler.h" - -namespace paddle { -namespace operators { -namespace detail { - -void SerializeToByteBuffer(const std::string& name, framework::Variable* var, - const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg, - const std::string& out_name) { - using VarMsg = sendrecv::VariableMessage; - // When using GPU, need to free the copied CPU buffer - // when the ByteBuffer destroies - // TODO(typhoonzero): add unref here, if we have dependent - // parallelism execution, need to know when to free the tensor. - DestroyCallback destroy_callback = [](void* backing) {}; - - auto buffer = std::unique_ptr(new char[1024]); - void* buf = buffer.get(); - - void* payload = nullptr; - size_t payload_size; - ProtoEncodeHelper e(static_cast(buf), 1024); - // Note: normally the profiler is enabled in 1 trainer, hence only - // 1 trainer returns true for ShouldSendProfileState(). It tells PS - // servers the trainer's profiling state so that PS can follow the - // trainer. - if (platform::ShouldSendProfileState()) { - e.WriteBool(VarMsg::kProfileFieldNumber, platform::IsProfileEnabled()); - } - e.WriteString(VarMsg::kVarnameFieldNumber, name); - if (var->IsType()) { - e.WriteUint64(VarMsg::kTypeFieldNumber, 0); - } else if (var->IsType()) { - e.WriteUint64(VarMsg::kTypeFieldNumber, 1); - } - - if (!out_name.empty()) { - e.WriteString(VarMsg::kOutVarnameFieldNumber, out_name); - } - switch (framework::ToVarType(var->Type())) { - case framework::proto::VarType_Type_LOD_TENSOR: { - auto tensor = var->Get(); - e.WriteUint64(VarMsg::kDataTypeFieldNumber, - framework::ToDataType(tensor.type())); - for (auto& dim : framework::vectorize(tensor.dims())) { - e.WriteUint64(VarMsg::kDimsFieldNumber, dim); - } - auto lod = tensor.lod(); // std::vector> - if (lod.size() > 0) { - e.WriteUint64(VarMsg::kLodLevelFieldNumber, lod.size()); - - for (auto& each : lod) { - e.WriteVarlengthBeginning(VarMsg::kLodFieldNumber, - 2 + // tag + varintlength of submessage - 1 + // kLodDataFieldNumber - each.size()); - // auto copied from GPU - for (auto& d : each) { - e.WriteUint64(VarMsg::LodData::kLodDataFieldNumber, d); - } - } - } - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); - platform::CPUPlace cpu; - auto& gpu_dev_ctx = - static_cast(ctx); - auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); - payload = memory::Alloc(cpu, copy_size); - - memory::Copy(cpu, payload, - boost::get(tensor.place()), - reinterpret_cast(tensor.data()), - copy_size, gpu_dev_ctx.stream()); - ctx.Wait(); - destroy_callback = [](void* backing) { - platform::CPUPlace cpu; - memory::Free(cpu, backing); - }; - -#endif - } else { - payload = tensor.data(); - } - payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); - } break; - case framework::proto::VarType_Type_SELECTED_ROWS: { - // TODO(typhoonzero): selectedrows implement should not use unique_ptr - auto* slr = var->GetMutable(); - e.WriteUint64(VarMsg::kDataTypeFieldNumber, - framework::ToDataType(slr->value().type())); - for (auto& dim : framework::vectorize(slr->value().dims())) { - e.WriteUint64(VarMsg::kDimsFieldNumber, dim); - } - e.WriteUint64(VarMsg::kLodLevelFieldNumber, 0); - e.WriteUint64(VarMsg::kSlrHeightFieldNumber, slr->height()); - auto* tensor = slr->mutable_value(); - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef PADDLE_WITH_CUDA - platform::CPUPlace cpu; - auto& gpu_dev_ctx = - static_cast(ctx); - auto copy_size = - tensor->numel() * framework::SizeOfType(tensor->type()); - payload = memory::Alloc(cpu, copy_size); - memory::Copy(cpu, payload, - boost::get(tensor->place()), - reinterpret_cast(tensor->data()), - copy_size, gpu_dev_ctx.stream()); - ctx.Wait(); - destroy_callback = [](void* backing) { - platform::CPUPlace cpu; - memory::Free(cpu, backing); - }; -#endif - } else { - payload = slr->mutable_value()->data(); - } - payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); - e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); - } break; - default: - PADDLE_THROW("Serialize does not support type: %s", - typeid(var->Type()).name()); - break; - } - // steal reference of tensor data - ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows - int num_slices = 2; // only SelectedRows have rows buffer - slices[0] = ::grpc::Slice(e.size()); - memcpy(const_cast(slices[0].begin()), e.data(), e.size()); - slices[1] = ::grpc::Slice( - grpc_slice_new_with_user_data(payload, payload_size, destroy_callback, - static_cast(payload)), - ::grpc::Slice::STEAL_REF); - - if (framework::ToVarType(var->Type()) == - framework::proto::VarType_Type_SELECTED_ROWS) { - auto* slr = var->GetMutable(); - - ProtoEncodeHelper e2(static_cast(buf), 128); - // NOTE: rows is of type int64_t - size_t rows_memory_size = - slr->rows().size() * framework::SizeOfType(typeid(int64_t)); - e2.WriteVarlengthBeginning(VarMsg::kRowsFieldNumber, rows_memory_size); - slices[2] = ::grpc::Slice(e2.size()); - memcpy(const_cast(slices[2].begin()), e2.data(), e2.size()); - - slices[3] = ::grpc::Slice( - grpc_slice_new_with_user_data( - const_cast( - reinterpret_cast(slr->rows().data())), - rows_memory_size, - [](void* backing) { - // TODO(typhoonzero): add unref here, same as above. - }, - const_cast( - reinterpret_cast(slr->rows().data()))), - ::grpc::Slice::STEAL_REF); - num_slices = 4; - } - - ::grpc::ByteBuffer tmp(&slices[0], num_slices); - msg->Swap(&tmp); -} - -void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, - const platform::DeviceContext& ctx, - const framework::Scope* scope, - framework::Variable** var) { - operators::detail::VariableResponse resp(scope, &ctx); - PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); - *var = resp.GetVar(); -} - -} // namespace detail -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/detection/CMakeLists.txt b/paddle/fluid/operators/detection/CMakeLists.txt new file mode 100644 index 0000000000..a44d84cd7b --- /dev/null +++ b/paddle/fluid/operators/detection/CMakeLists.txt @@ -0,0 +1,34 @@ +set(LOCAL_DETECTION_LIBS) + +function(detection_library TARGET_NAME) + set(oneValueArgs "") + set(multiValueArgs SRCS DEPS) + set(options "") + set(common_deps op_registry) + set(pybind_flag 0) + cmake_parse_arguments(detection_library "${options}" "${oneValueArgs}" + "${multiValueArgs}" ${ARGN}) + op_library(${TARGET_NAME} SRCS ${detection_library_SRCS} DEPS ${common_deps} ${detection_library_DEPS}) + set(LOCAL_DETECTION_LIBS + ${TARGET_NAME} + ${LOCAL_DETECTION_LIBS} + PARENT_SCOPE) +endfunction() + +detection_library(bipartite_match_op SRCS bipartite_match_op.cc) +detection_library(box_coder_op SRCS box_coder_op.cc box_coder_op.cu) +detection_library(iou_similarity_op SRCS iou_similarity_op.cc +iou_similarity_op.cu) +detection_library(mine_hard_examples_op SRCS mine_hard_examples_op.cc) +detection_library(multiclass_nms_op SRCS multiclass_nms_op.cc) +detection_library(prior_box_op SRCS prior_box_op.cc prior_box_op.cu) +detection_library(anchor_generator_op SRCS anchor_generator_op.cc +anchor_generator_op.cu) +detection_library(target_assign_op SRCS target_assign_op.cc +target_assign_op.cu) +detection_library(polygon_box_transform_op SRCS polygon_box_transform_op.cc +polygon_box_transform_op.cu) +detection_library(rpn_target_assign_op SRCS rpn_target_assign_op.cc) + +# Export local libraries to parent +set(DETECTION_LIBRARY ${LOCAL_DETECTION_LIBS} PARENT_SCOPE) diff --git a/paddle/fluid/operators/detection/anchor_generator_op.cc b/paddle/fluid/operators/detection/anchor_generator_op.cc new file mode 100644 index 0000000000..0c0155a0a9 --- /dev/null +++ b/paddle/fluid/operators/detection/anchor_generator_op.cc @@ -0,0 +1,154 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/detection/anchor_generator_op.h" + +namespace paddle { +namespace operators { + +class AnchorGeneratorOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input(Input) of AnchorGeneratorOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Anchors"), + "Output(Anchors) of AnchorGeneratorOp should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("Variances"), + "Output(Variances) of AnchorGeneratorOp should not be null."); + + auto input_dims = ctx->GetInputDim("Input"); + PADDLE_ENFORCE(input_dims.size() == 4, "The layout of input is NCHW."); + + auto anchor_sizes = ctx->Attrs().Get>("anchor_sizes"); + auto aspect_ratios = ctx->Attrs().Get>("aspect_ratios"); + auto stride = ctx->Attrs().Get>("stride"); + auto variances = ctx->Attrs().Get>("variances"); + + size_t num_anchors = aspect_ratios.size() * anchor_sizes.size(); + + std::vector dim_vec(4); + dim_vec[0] = input_dims[2]; + dim_vec[1] = input_dims[3]; + dim_vec[2] = num_anchors; + dim_vec[3] = 4; + ctx->SetOutputDim("Anchors", framework::make_ddim(dim_vec)); + ctx->SetOutputDim("Variances", framework::make_ddim(dim_vec)); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), + ctx.device_context()); + } +}; + +class AnchorGeneratorOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Input", + "(Tensor, default Tensor), " + "the input feature is a tensor with a rank of 4. " + "The layout is NCHW."); + AddOutput("Anchors", + "(Tensor, default Tensor), the output is a " + "tensor with a rank of 4. The layout is [H, W, num_anchors, 4]. " + "H is the height of input, W is the width of input, num_anchors " + "is the box count of each position. " + "Each anchor is in (xmin, ymin, xmax, ymax) format"); + AddOutput("Variances", + "(Tensor, default Tensor), the expanded variances for " + "normalizing bbox regression targets. The layout is [H, W, " + "num_anchors, 4]. " + "H is the height of input, W is the width of input, num_anchors " + "is the box count of each position. " + "Each variance is in (xcenter, ycenter, w, h) format"); + + AddAttr>( + "anchor_sizes", + "(vector) List of Region Proposal Network(RPN) anchor sizes " + " given in absolute pixels e.g. (64, 128, 256, 512)." + " For instance, the anchor size of 64 means the area of this anchor " + "equals to 64**2.") + .AddCustomChecker([](const std::vector& anchor_sizes) { + PADDLE_ENFORCE_GT(anchor_sizes.size(), 0, + "Size of anchor_sizes must be at least 1."); + for (size_t i = 0; i < anchor_sizes.size(); ++i) { + PADDLE_ENFORCE_GT(anchor_sizes[i], 0.0, + "anchor_sizes[%d] must be positive.", i); + } + }); + AddAttr>( + "aspect_ratios", + "(vector) List of Region Proposal Network(RPN) anchor aspect " + "ratios, e.g. (0.5, 1, 2)." + "For instacne, the aspect ratio of 0.5 means the height / width of " + "this anchor equals 0.5."); + + AddAttr>("variances", + "(vector) List of variances to be used " + "in box regression deltas") + .AddCustomChecker([](const std::vector& variances) { + PADDLE_ENFORCE_EQ(variances.size(), 4, + "Must and only provide 4 variance."); + for (size_t i = 0; i < variances.size(); ++i) { + PADDLE_ENFORCE_GT(variances[i], 0.0, + "variance[%d] must be greater than 0.", i); + } + }); + + AddAttr>("stride", + "Anchors stride across width and height, " + "with a default of (16, 16)") + .SetDefault(std::vector(2, 16.0)) + .AddCustomChecker([](const std::vector& stride) { + PADDLE_ENFORCE_EQ( + stride.size(), 2, + "Must and only provide 2 stride for width and height."); + for (size_t i = 0; i < stride.size(); ++i) { + PADDLE_ENFORCE_GT(stride[i], 0.0, + "stride[%d] should be larger than 0.", i); + } + }); + + AddAttr("offset", + "(float) " + "Anchor center offset, with a default of 0.5") + .SetDefault(0.5); + AddComment(R"DOC( +AnchorGenerator operator +Generates anchors for Faster RCNN, FPN etc. algorithm. +Each position of the input produce N anchors, N = + size(anchor_sizes) * size(aspect_ratios). + +Please get more information from the following papers: +https://arxiv.org/abs/1506.01497. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(anchor_generator, ops::AnchorGeneratorOp, + ops::AnchorGeneratorOpMaker, + paddle::framework::EmptyGradOpMaker); + +REGISTER_OP_CPU_KERNEL(anchor_generator, ops::AnchorGeneratorOpKernel, + ops::AnchorGeneratorOpKernel); diff --git a/paddle/fluid/operators/detection/anchor_generator_op.cu b/paddle/fluid/operators/detection/anchor_generator_op.cu new file mode 100644 index 0000000000..3cc9bbeee1 --- /dev/null +++ b/paddle/fluid/operators/detection/anchor_generator_op.cu @@ -0,0 +1,132 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/detection/anchor_generator_op.h" + +namespace paddle { +namespace operators { + +template +__global__ void GenAnchors(T* out, const T* aspect_ratios, const int ar_num, + const T* anchor_sizes, const int as_num, + const T* stride, const int sd_num, const int height, + const int width, const T offset) { + int num_anchors = as_num * ar_num; + int box_num = height * width * num_anchors; + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < box_num; + i += blockDim.x * gridDim.x) { + int h_idx = i / (num_anchors * width); + int w_idx = (i / num_anchors) % width; + T stride_width = stride[0]; + T stride_height = stride[1]; + T x_ctr = (w_idx * stride_width) + offset * (stride_width - 1); + T y_ctr = (h_idx * stride_height) + offset * (stride_height - 1); + T area, area_ratios; + T base_w, base_h; + T scale_w, scale_h; + T anchor_width, anchor_height; + int anch_idx = i % num_anchors; + int ar_idx = anch_idx / as_num; + int as_idx = anch_idx % as_num; + T aspect_ratio = aspect_ratios[ar_idx]; + T anchor_size = anchor_sizes[as_idx]; + area = stride_width * stride_height; + area_ratios = area / aspect_ratio; + base_w = round(sqrt(area_ratios)); + base_h = round(base_w * aspect_ratio); + scale_w = anchor_size / stride_width; + scale_h = anchor_size / stride_height; + anchor_width = scale_w * base_w; + anchor_height = scale_h * base_h; + + T xmin = (x_ctr - 0.5 * (anchor_width - 1)); + T ymin = (y_ctr - 0.5 * (anchor_height - 1)); + T xmax = (x_ctr + 0.5 * (anchor_width - 1)); + T ymax = (y_ctr + 0.5 * (anchor_height - 1)); + out[i * 4] = xmin; + out[i * 4 + 1] = ymin; + out[i * 4 + 2] = xmax; + out[i * 4 + 3] = ymax; + } +} + +template +__global__ void SetVariance(T* out, const T* var, const int vnum, + const int num) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + out[i] = var[i % vnum]; + } +} + +template +class AnchorGeneratorOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto* anchors = ctx.Output("Anchors"); + auto* vars = ctx.Output("Variances"); + + auto anchor_sizes = ctx.Attr>("anchor_sizes"); + auto aspect_ratios = ctx.Attr>("aspect_ratios"); + auto stride = ctx.Attr>("stride"); + auto variances = ctx.Attr>("variances"); + + T offset = static_cast(ctx.Attr("offset")); + + auto width = input->dims()[3]; + auto height = input->dims()[2]; + + int num_anchors = aspect_ratios.size() * anchor_sizes.size(); + + int box_num = width * height * num_anchors; + + int block = 512; + int grid = (box_num + block - 1) / block; + + auto stream = + ctx.template device_context().stream(); + + anchors->mutable_data(ctx.GetPlace()); + vars->mutable_data(ctx.GetPlace()); + + framework::Tensor ar; + framework::TensorFromVector(aspect_ratios, ctx.device_context(), &ar); + + framework::Tensor as; + framework::TensorFromVector(anchor_sizes, ctx.device_context(), &as); + + framework::Tensor sd; + framework::TensorFromVector(stride, ctx.device_context(), &sd); + + GenAnchors<<>>( + anchors->data(), ar.data(), aspect_ratios.size(), as.data(), + anchor_sizes.size(), sd.data(), stride.size(), height, width, + offset); + + framework::Tensor v; + framework::TensorFromVector(variances, ctx.device_context(), &v); + grid = (box_num * 4 + block - 1) / block; + SetVariance<<>>(vars->data(), v.data(), + variances.size(), box_num * 4); + } +}; // namespace operators + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(anchor_generator, + ops::AnchorGeneratorOpCUDAKernel, + ops::AnchorGeneratorOpCUDAKernel); diff --git a/paddle/fluid/operators/detection/anchor_generator_op.h b/paddle/fluid/operators/detection/anchor_generator_op.h new file mode 100644 index 0000000000..e0e499d76a --- /dev/null +++ b/paddle/fluid/operators/detection/anchor_generator_op.h @@ -0,0 +1,109 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/platform/transform.h" + +namespace paddle { +namespace operators { + +template +class AnchorGeneratorOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* input = ctx.Input("Input"); + auto* anchors = ctx.Output("Anchors"); + auto* vars = ctx.Output("Variances"); + + auto anchor_sizes = ctx.Attr>("anchor_sizes"); + auto aspect_ratios = ctx.Attr>("aspect_ratios"); + auto stride = ctx.Attr>("stride"); + auto variances = ctx.Attr>("variances"); + + T offset = static_cast(ctx.Attr("offset")); + + auto feature_width = input->dims()[3]; + auto feature_height = input->dims()[2]; + + T stride_width, stride_height; + stride_width = stride[0]; + stride_height = stride[1]; + + int num_anchors = aspect_ratios.size() * anchor_sizes.size(); + + anchors->mutable_data(ctx.GetPlace()); + vars->mutable_data(ctx.GetPlace()); + + auto e_anchors = framework::EigenTensor::From(*anchors); + for (int h_idx = 0; h_idx < feature_height; ++h_idx) { + for (int w_idx = 0; w_idx < feature_width; ++w_idx) { + T x_ctr = (w_idx * stride_width) + offset * (stride_width - 1); + T y_ctr = (h_idx * stride_height) + offset * (stride_height - 1); + T area, area_ratios; + T base_w, base_h; + T scale_w, scale_h; + T anchor_width, anchor_height; + int idx = 0; + for (size_t r = 0; r < aspect_ratios.size(); ++r) { + auto ar = aspect_ratios[r]; + for (size_t s = 0; s < anchor_sizes.size(); ++s) { + auto anchor_size = anchor_sizes[s]; + area = stride_width * stride_height; + area_ratios = area / ar; + base_w = round(sqrt(area_ratios)); + base_h = round(base_w * ar); + scale_w = anchor_size / stride_width; + scale_h = anchor_size / stride_height; + anchor_width = scale_w * base_w; + anchor_height = scale_h * base_h; + e_anchors(h_idx, w_idx, idx, 0) = + (x_ctr - 0.5 * (anchor_width - 1)); + e_anchors(h_idx, w_idx, idx, 1) = + (y_ctr - 0.5 * (anchor_height - 1)); + e_anchors(h_idx, w_idx, idx, 2) = + (x_ctr + 0.5 * (anchor_width - 1)); + e_anchors(h_idx, w_idx, idx, 3) = + (y_ctr + 0.5 * (anchor_height - 1)); + idx++; + } + } + } + } + + framework::Tensor var_t; + var_t.mutable_data( + framework::make_ddim({1, static_cast(variances.size())}), + ctx.GetPlace()); + auto var_et = framework::EigenTensor::From(var_t); + for (size_t i = 0; i < variances.size(); ++i) { + var_et(0, i) = variances[i]; + } + + int anchor_num = feature_height * feature_width * num_anchors; + auto var_dim = vars->dims(); + vars->Resize({anchor_num, static_cast(variances.size())}); + + auto e_vars = framework::EigenMatrix::From(*vars); + e_vars = var_et.broadcast(Eigen::DSizes(anchor_num, 1)); + + vars->Resize(var_dim); + } +}; // namespace operators + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/bipartite_match_op.cc b/paddle/fluid/operators/detection/bipartite_match_op.cc similarity index 79% rename from paddle/fluid/operators/bipartite_match_op.cc rename to paddle/fluid/operators/detection/bipartite_match_op.cc index 1218d9fdc1..c23b65fe4d 100644 --- a/paddle/fluid/operators/bipartite_match_op.cc +++ b/paddle/fluid/operators/detection/bipartite_match_op.cc @@ -51,6 +51,12 @@ class BipartiteMatchOp : public framework::OperatorWithKernel { } }; +template +bool DistPairDescend(std::tuple pair1, + std::tuple pair2) { + return std::get<2>(pair1) > std::get<2>(pair2); +} + template class BipartiteMatchKernel : public framework::OpKernel { public: @@ -58,46 +64,76 @@ class BipartiteMatchKernel : public framework::OpKernel { // The match_dist must be initialized to 0 at first. void BipartiteMatch(const Tensor& dist, int* match_indices, T* match_dist) const { - constexpr T kEPS = static_cast(1e-6); PADDLE_ENFORCE_EQ(dist.dims().size(), 2, "The rank of dist must be 2."); int64_t row = dist.dims()[0]; int64_t col = dist.dims()[1]; auto* dist_data = dist.data(); - std::vector row_pool; - for (int i = 0; i < row; ++i) { - row_pool.push_back(i); - } - while (row_pool.size() > 0) { - int max_idx = -1; - int max_row_idx = -1; - T max_dist = -1; - for (int64_t j = 0; j < col; ++j) { - if (match_indices[j] != -1) { - continue; + // Test result: When row==130 the speed of these two methods almost the same + if (row >= 130) { + std::vector> match_pair; + + for (int64_t i = 0; i < row; ++i) { + for (int64_t j = 0; j < col; ++j) { + match_pair.push_back(std::make_tuple(i, j, dist_data[i * col + j])); } - for (size_t k = 0; k < row_pool.size(); ++k) { - int m = row_pool[k]; - // distance is 0 between m-th row and j-th column - if (dist_data[m * col + j] < kEPS) { + } + std::sort(match_pair.begin(), match_pair.end(), DistPairDescend); + std::vector row_indices(row, -1); + + int64_t idx = 0; + for (int64_t k = 0; k < row * col; ++k) { + int64_t i = std::get<0>(match_pair[k]); + int64_t j = std::get<1>(match_pair[k]); + T dist = std::get<2>(match_pair[k]); + + if (idx >= row) { + break; + } + if (match_indices[j] == -1 && row_indices[i] == -1 && dist > 0) { + match_indices[j] = i; + row_indices[i] = j; + match_dist[j] = dist; + idx += 1; + } + } + } else { + constexpr T kEPS = static_cast(1e-6); + std::vector row_pool; + for (int i = 0; i < row; ++i) { + row_pool.push_back(i); + } + while (row_pool.size() > 0) { + int max_idx = -1; + int max_row_idx = -1; + T max_dist = -1; + for (int64_t j = 0; j < col; ++j) { + if (match_indices[j] != -1) { continue; } - if (dist_data[m * col + j] > max_dist) { - max_idx = j; - max_row_idx = m; - max_dist = dist_data[m * col + j]; + for (size_t k = 0; k < row_pool.size(); ++k) { + int m = row_pool[k]; + // distance is 0 between m-th row and j-th column + if (dist_data[m * col + j] < kEPS) { + continue; + } + if (dist_data[m * col + j] > max_dist) { + max_idx = j; + max_row_idx = m; + max_dist = dist_data[m * col + j]; + } } } - } - if (max_idx == -1) { - // Cannot find good match. - break; - } else { - PADDLE_ENFORCE_EQ(match_indices[max_idx], -1); - match_indices[max_idx] = max_row_idx; - match_dist[max_idx] = max_dist; - // Erase the row index. - row_pool.erase( - std::find(row_pool.begin(), row_pool.end(), max_row_idx)); + if (max_idx == -1) { + // Cannot find good match. + break; + } else { + PADDLE_ENFORCE_EQ(match_indices[max_idx], -1); + match_indices[max_idx] = max_row_idx; + match_dist[max_idx] = max_dist; + // Erase the row index. + row_pool.erase( + std::find(row_pool.begin(), row_pool.end(), max_row_idx)); + } } } } @@ -182,8 +218,7 @@ class BipartiteMatchKernel : public framework::OpKernel { class BipartiteMatchOpMaker : public framework::OpProtoAndCheckerMaker { public: - BipartiteMatchOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "DistMat", "(LoDTensor or Tensor) this input is a 2-D LoDTensor with shape " diff --git a/paddle/fluid/operators/box_coder_op.cc b/paddle/fluid/operators/detection/box_coder_op.cc similarity index 77% rename from paddle/fluid/operators/box_coder_op.cc rename to paddle/fluid/operators/detection/box_coder_op.cc index ec416f725e..d0f95f727f 100644 --- a/paddle/fluid/operators/box_coder_op.cc +++ b/paddle/fluid/operators/detection/box_coder_op.cc @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/box_coder_op.h" +#include "paddle/fluid/operators/detection/box_coder_op.h" namespace paddle { namespace operators { @@ -22,21 +22,21 @@ class BoxCoderOp : public framework::OperatorWithKernel { void InferShape(framework::InferShapeContext *ctx) const override { PADDLE_ENFORCE(ctx->HasInput("PriorBox"), "Input(PriorBox) of BoxCoderOp should not be null."); - PADDLE_ENFORCE(ctx->HasInput("PriorBoxVar"), - "Input(PriorBoxVar) of BoxCoderOp should not be null."); PADDLE_ENFORCE(ctx->HasInput("TargetBox"), "Input(TargetBox) of BoxCoderOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("OutputBox"), "Output(OutputBox) of BoxCoderOp should not be null."); auto prior_box_dims = ctx->GetInputDim("PriorBox"); - auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); auto target_box_dims = ctx->GetInputDim("TargetBox"); PADDLE_ENFORCE_EQ(prior_box_dims.size(), 2, "The rank of Input of PriorBoxVar must be 2"); PADDLE_ENFORCE_EQ(prior_box_dims[1], 4, "The shape of PriorBox is [N, 4]"); - PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + if (ctx->HasInput("PriorBoxVar")) { + auto prior_box_var_dims = ctx->GetInputDim("PriorBoxVar"); + PADDLE_ENFORCE_EQ(prior_box_dims, prior_box_var_dims); + } auto code_type = GetBoxCodeType(ctx->Attrs().Get("code_type")); if (code_type == BoxCodeType::kEncodeCenterSize) { @@ -60,8 +60,7 @@ class BoxCoderOp : public framework::OperatorWithKernel { class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { public: - BoxCoderOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "PriorBox", "(Tensor, default Tensor) " @@ -72,9 +71,11 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { "of the coordinate system. [xmax, ymax] is the right bottom " "coordinate of the anchor box."); AddInput("PriorBoxVar", - "(Tensor, default Tensor) " + "(Tensor, default Tensor, optional) " "PriorBoxVar is a 2-D Tensor with shape [M, 4] holds M group " - "of variance."); + "of variance. PriorBoxVar will set all elements to 1 by " + "default.") + .AsDispensable(); AddInput( "TargetBox", "(LoDTensor or Tensor) This input can be a 2-D LoDTensor with shape " @@ -92,6 +93,10 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { "the code type used with the target box") .SetDefault("encode_center_size") .InEnum({"encode_center_size", "decode_center_size"}); + AddAttr("box_normalized", + "(bool, default true) " + "whether treat the priorbox as a noramlized box") + .SetDefault(true); AddOutput("OutputBox", "(LoDTensor or Tensor) " "When code_type is 'encode_center_size', the output tensor of " @@ -101,23 +106,36 @@ class BoxCoderOpMaker : public framework::OpProtoAndCheckerMaker { "and M represents the number of deocded boxes."); AddComment(R"DOC( -Bounding Box Coder Operator. + +Bounding Box Coder. + Encode/Decode the target bounding box with the priorbox information. + The Encoding schema described below: -ox = (tx - px) / pw / pxv -oy = (ty - py) / ph / pyv -ow = log(abs(tw / pw)) / pwv -oh = log(abs(th / ph)) / phv + + ox = (tx - px) / pw / pxv + + oy = (ty - py) / ph / pyv + + ow = log(abs(tw / pw)) / pwv + + oh = log(abs(th / ph)) / phv + The Decoding schema described below: -ox = (pw * pxv * tx * + px) - tw / 2 -oy = (ph * pyv * ty * + py) - th / 2 -ow = exp(pwv * tw) * pw + tw / 2 -oh = exp(phv * th) * ph + th / 2 -where tx, ty, tw, th denote the target box's center coordinates, width and -height respectively. Similarly, px, py, pw, ph denote the priorbox's(anchor) -center coordinates, width and height. pxv, pyv, pwv, phv denote the variance -of the priorbox and ox, oy, ow, oh denote the encoded/decoded coordinates, -width and height. + + ox = (pw * pxv * tx * + px) - tw / 2 + + oy = (ph * pyv * ty * + py) - th / 2 + + ow = exp(pwv * tw) * pw + tw / 2 + + oh = exp(phv * th) * ph + th / 2 + +where `tx`, `ty`, `tw`, `th` denote the target box's center coordinates, width +and height respectively. Similarly, `px`, `py`, `pw`, `ph` denote the +priorbox's (anchor) center coordinates, width and height. `pxv`, `pyv`, `pwv`, +`phv` denote the variance of the priorbox and `ox`, `oy`, `ow`, `oh` denote the +encoded/decoded coordinates, width and height. )DOC"); } }; @@ -128,5 +146,6 @@ width and height. namespace ops = paddle::operators; REGISTER_OPERATOR(box_coder, ops::BoxCoderOp, ops::BoxCoderOpMaker, paddle::framework::EmptyGradOpMaker); -REGISTER_OP_CPU_KERNEL(box_coder, ops::BoxCoderKernel, - ops::BoxCoderKernel); +REGISTER_OP_CPU_KERNEL( + box_coder, ops::BoxCoderKernel, + ops::BoxCoderKernel); diff --git a/paddle/fluid/operators/box_coder_op.cu b/paddle/fluid/operators/detection/box_coder_op.cu similarity index 61% rename from paddle/fluid/operators/box_coder_op.cu rename to paddle/fluid/operators/detection/box_coder_op.cu index 708c7a5fa9..a7af111f63 100644 --- a/paddle/fluid/operators/box_coder_op.cu +++ b/paddle/fluid/operators/detection/box_coder_op.cu @@ -9,7 +9,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/box_coder_op.h" +#include "paddle/fluid/operators/detection/box_coder_op.h" #include "paddle/fluid/platform/cuda_primitives.h" namespace paddle { @@ -20,15 +20,16 @@ __global__ void EncodeCenterSizeKernel(const T* prior_box_data, const T* prior_box_var_data, const T* target_box_data, const int row, const int col, const int len, - T* output) { + const bool normalized, T* output) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < row * col) { const int row_idx = idx / col; const int col_idx = idx % col; - T prior_box_width = - prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len]; - T prior_box_height = - prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1]; + T prior_box_width = prior_box_data[col_idx * len + 2] - + prior_box_data[col_idx * len] + (normalized == false); + T prior_box_height = prior_box_data[col_idx * len + 3] - + prior_box_data[col_idx * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2; T prior_box_center_y = (prior_box_data[col_idx * len + 3] + @@ -41,20 +42,24 @@ __global__ void EncodeCenterSizeKernel(const T* prior_box_data, T target_box_center_y = (target_box_data[row_idx * len + 3] + target_box_data[row_idx * len + 1]) / 2; - T target_box_width = - target_box_data[row_idx * len + 2] - target_box_data[row_idx * len]; - T target_box_height = - target_box_data[row_idx * len + 3] - target_box_data[row_idx * len + 1]; + T target_box_width = target_box_data[row_idx * len + 2] - + target_box_data[row_idx * len] + (normalized == false); + T target_box_height = target_box_data[row_idx * len + 3] - + target_box_data[row_idx * len + 1] + + (normalized == false); - output[idx * len] = (target_box_center_x - prior_box_center_x) / - prior_box_width / prior_box_var_data[col_idx * len]; - output[idx * len + 1] = (target_box_center_y - prior_box_center_y) / - prior_box_height / - prior_box_var_data[col_idx * len + 1]; - output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)) / - prior_box_var_data[col_idx * len + 2]; - output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)) / - prior_box_var_data[col_idx * len + 3]; + output[idx * len] = + (target_box_center_x - prior_box_center_x) / prior_box_width; + output[idx * len + 1] = + (target_box_center_y - prior_box_center_y) / prior_box_height; + output[idx * len + 2] = log(fabs(target_box_width / prior_box_width)); + output[idx * len + 3] = log(fabs(target_box_height / prior_box_height)); + if (prior_box_var_data) { + output[idx * len] /= prior_box_var_data[col_idx * len]; + output[idx * len + 1] /= prior_box_var_data[col_idx * len + 1]; + output[idx * len + 2] /= prior_box_var_data[col_idx * len + 2]; + output[idx * len + 3] /= prior_box_var_data[col_idx * len + 3]; + } } } @@ -63,42 +68,56 @@ __global__ void DecodeCenterSizeKernel(const T* prior_box_data, const T* prior_box_var_data, const T* target_box_data, const int row, const int col, const int len, - T* output) { + const bool normalized, T* output) { const int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < row * col) { const int col_idx = idx % col; - T prior_box_width = - prior_box_data[col_idx * len + 2] - prior_box_data[col_idx * len]; - T prior_box_height = - prior_box_data[col_idx * len + 3] - prior_box_data[col_idx * len + 1]; + T prior_box_width = prior_box_data[col_idx * len + 2] - + prior_box_data[col_idx * len] + (normalized == false); + T prior_box_height = prior_box_data[col_idx * len + 3] - + prior_box_data[col_idx * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[col_idx * len + 2] + prior_box_data[col_idx * len]) / 2; T prior_box_center_y = (prior_box_data[col_idx * len + 3] + prior_box_data[col_idx * len + 1]) / 2; - - T target_box_width = exp(prior_box_var_data[col_idx * len + 2] * + T target_box_width, target_box_height; + T target_box_center_x, target_box_center_y; + if (prior_box_var_data) { + target_box_width = exp(prior_box_var_data[col_idx * len + 2] * target_box_data[idx * len + 2]) * prior_box_width; - T target_box_height = exp(prior_box_var_data[col_idx * len + 3] * + target_box_height = exp(prior_box_var_data[col_idx * len + 3] * target_box_data[idx * len + 3]) * prior_box_height; - T target_box_center_x = prior_box_var_data[col_idx * len] * + target_box_center_x = prior_box_var_data[col_idx * len] * target_box_data[idx * len] * prior_box_width + prior_box_center_x; - T target_box_center_y = prior_box_var_data[col_idx * len + 1] * + target_box_center_y = prior_box_var_data[col_idx * len + 1] * target_box_data[idx * len + 1] * prior_box_height + prior_box_center_y; + } else { + target_box_width = exp(target_box_data[idx * len + 2]) * prior_box_width; + target_box_height = + exp(target_box_data[idx * len + 3]) * prior_box_height; + target_box_center_x = + target_box_data[idx * len] * prior_box_width + prior_box_center_x; + target_box_center_y = target_box_data[idx * len + 1] * prior_box_height + + prior_box_center_y; + } output[idx * len] = target_box_center_x - target_box_width / 2; output[idx * len + 1] = target_box_center_y - target_box_height / 2; - output[idx * len + 2] = target_box_center_x + target_box_width / 2; - output[idx * len + 3] = target_box_center_y + target_box_height / 2; + output[idx * len + 2] = + target_box_center_x + target_box_width / 2 - (normalized == false); + output[idx * len + 3] = + target_box_center_y + target_box_height / 2 - (normalized == false); } } -template +template class BoxCoderCUDAKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { @@ -109,6 +128,11 @@ class BoxCoderCUDAKernel : public framework::OpKernel { auto* target_box = context.Input("TargetBox"); auto* output_box = context.Output("OutputBox"); + const T* prior_box_data = prior_box->data(); + const T* target_box_data = target_box->data(); + const T* prior_box_var_data = nullptr; + if (prior_box_var) prior_box_var_data = prior_box_var->data(); + if (target_box->lod().size()) { PADDLE_ENFORCE_EQ(target_box->lod().size(), 1, "Only support 1 level of LoD."); @@ -120,22 +144,19 @@ class BoxCoderCUDAKernel : public framework::OpKernel { int grid = (row * col + block - 1) / block; auto& device_ctx = context.cuda_device_context(); - const T* prior_box_data = prior_box->data(); - const T* prior_box_var_data = prior_box_var->data(); - const T* target_box_data = target_box->data(); - output_box->mutable_data({row, col, len}, context.GetPlace()); T* output = output_box->data(); auto code_type = GetBoxCodeType(context.Attr("code_type")); + bool normalized = context.Attr("box_normalized"); if (code_type == BoxCodeType::kEncodeCenterSize) { EncodeCenterSizeKernel<<>>( prior_box_data, prior_box_var_data, target_box_data, row, col, len, - output); + normalized, output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { DecodeCenterSizeKernel<<>>( prior_box_data, prior_box_var_data, target_box_data, row, col, len, - output); + normalized, output); } } }; @@ -144,5 +165,7 @@ class BoxCoderCUDAKernel : public framework::OpKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL(box_coder, ops::BoxCoderCUDAKernel, - ops::BoxCoderCUDAKernel); +REGISTER_OP_CUDA_KERNEL( + box_coder, + ops::BoxCoderCUDAKernel, + ops::BoxCoderCUDAKernel); diff --git a/paddle/fluid/operators/box_coder_op.h b/paddle/fluid/operators/detection/box_coder_op.h similarity index 50% rename from paddle/fluid/operators/box_coder_op.h rename to paddle/fluid/operators/detection/box_coder_op.h index 77fc6c2b62..5ed8520acd 100644 --- a/paddle/fluid/operators/box_coder_op.h +++ b/paddle/fluid/operators/detection/box_coder_op.h @@ -28,26 +28,28 @@ inline BoxCodeType GetBoxCodeType(const std::string& type) { PADDLE_THROW("Not support type %s.", type); } -template +template class BoxCoderKernel : public framework::OpKernel { public: - void EncodeCenterSize(const framework::Tensor& target_box, - const framework::Tensor& prior_box, - const framework::Tensor& prior_box_var, - T* output) const { - int64_t row = target_box.dims()[0]; - int64_t col = prior_box.dims()[0]; - int64_t len = prior_box.dims()[1]; - auto* target_box_data = target_box.data(); - auto* prior_box_data = prior_box.data(); - auto* prior_box_var_data = prior_box_var.data(); + void EncodeCenterSize(const framework::Tensor* target_box, + const framework::Tensor* prior_box, + const framework::Tensor* prior_box_var, + const bool normalized, T* output) const { + int64_t row = target_box->dims()[0]; + int64_t col = prior_box->dims()[0]; + int64_t len = prior_box->dims()[1]; + auto* target_box_data = target_box->data(); + auto* prior_box_data = prior_box->data(); + const T* prior_box_var_data = nullptr; + if (prior_box_var) prior_box_var_data = prior_box_var->data(); for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { - T prior_box_width = - prior_box_data[j * len + 2] - prior_box_data[j * len]; - T prior_box_height = - prior_box_data[j * len + 3] - prior_box_data[j * len + 1]; + T prior_box_width = prior_box_data[j * len + 2] - + prior_box_data[j * len] + (normalized == false); + T prior_box_height = prior_box_data[j * len + 3] - + prior_box_data[j * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2; T prior_box_center_y = @@ -57,67 +59,89 @@ class BoxCoderKernel : public framework::OpKernel { (target_box_data[i * len + 2] + target_box_data[i * len]) / 2; T target_box_center_y = (target_box_data[i * len + 3] + target_box_data[i * len + 1]) / 2; - T target_box_width = - target_box_data[i * len + 2] - target_box_data[i * len]; - T target_box_height = - target_box_data[i * len + 3] - target_box_data[i * len + 1]; + T target_box_width = target_box_data[i * len + 2] - + target_box_data[i * len] + (normalized == false); + T target_box_height = target_box_data[i * len + 3] - + target_box_data[i * len + 1] + + (normalized == false); size_t offset = i * col * len + j * len; - output[offset] = (target_box_center_x - prior_box_center_x) / - prior_box_width / prior_box_var_data[j * len]; - output[offset + 1] = (target_box_center_y - prior_box_center_y) / - prior_box_height / prior_box_var_data[j * len + 1]; + output[offset] = + (target_box_center_x - prior_box_center_x) / prior_box_width; + output[offset + 1] = + (target_box_center_y - prior_box_center_y) / prior_box_height; output[offset + 2] = - std::log(std::fabs(target_box_width / prior_box_width)) / - prior_box_var_data[j * len + 2]; + std::log(std::fabs(target_box_width / prior_box_width)); output[offset + 3] = - std::log(std::fabs(target_box_height / prior_box_height)) / - prior_box_var_data[j * len + 3]; + std::log(std::fabs(target_box_height / prior_box_height)); + if (prior_box_var) { + output[offset] /= prior_box_var_data[j * len]; + output[offset + 1] /= prior_box_var_data[j * len + 1]; + output[offset + 2] /= prior_box_var_data[j * len + 2]; + output[offset + 3] /= prior_box_var_data[j * len + 3]; + } } } } - void DecodeCenterSize(const framework::Tensor& target_box, - const framework::Tensor& prior_box, - const framework::Tensor& prior_box_var, - T* output) const { - int64_t row = target_box.dims()[0]; - int64_t col = prior_box.dims()[0]; - int64_t len = prior_box.dims()[1]; - - auto* target_box_data = target_box.data(); - auto* prior_box_data = prior_box.data(); - auto* prior_box_var_data = prior_box_var.data(); + void DecodeCenterSize(const framework::Tensor* target_box, + const framework::Tensor* prior_box, + const framework::Tensor* prior_box_var, + const bool normalized, T* output) const { + int64_t row = target_box->dims()[0]; + int64_t col = prior_box->dims()[0]; + int64_t len = prior_box->dims()[1]; + + auto* target_box_data = target_box->data(); + auto* prior_box_data = prior_box->data(); + const T* prior_box_var_data = nullptr; + if (prior_box_var) prior_box_var_data = prior_box_var->data(); for (int64_t i = 0; i < row; ++i) { for (int64_t j = 0; j < col; ++j) { size_t offset = i * col * len + j * len; - T prior_box_width = - prior_box_data[j * len + 2] - prior_box_data[j * len]; - T prior_box_height = - prior_box_data[j * len + 3] - prior_box_data[j * len + 1]; + T prior_box_width = prior_box_data[j * len + 2] - + prior_box_data[j * len] + (normalized == false); + T prior_box_height = prior_box_data[j * len + 3] - + prior_box_data[j * len + 1] + + (normalized == false); T prior_box_center_x = (prior_box_data[j * len + 2] + prior_box_data[j * len]) / 2; T prior_box_center_y = (prior_box_data[j * len + 3] + prior_box_data[j * len + 1]) / 2; - T target_box_center_x = prior_box_var_data[j * len] * + T target_box_center_x = 0, target_box_center_y = 0; + T target_box_width = 0, target_box_height = 0; + if (prior_box_var) { + target_box_center_x = prior_box_var_data[j * len] * target_box_data[offset] * prior_box_width + prior_box_center_x; - T target_box_center_y = prior_box_var_data[j * len + 1] * + target_box_center_y = prior_box_var_data[j * len + 1] * target_box_data[offset + 1] * prior_box_height + prior_box_center_y; - T target_box_width = std::exp(prior_box_var_data[j * len + 2] * + target_box_width = std::exp(prior_box_var_data[j * len + 2] * target_box_data[offset + 2]) * prior_box_width; - T target_box_height = std::exp(prior_box_var_data[j * len + 3] * + target_box_height = std::exp(prior_box_var_data[j * len + 3] * target_box_data[offset + 3]) * prior_box_height; + } else { + target_box_center_x = + target_box_data[offset] * prior_box_width + prior_box_center_x; + target_box_center_y = target_box_data[offset + 1] * prior_box_height + + prior_box_center_y; + target_box_width = + std::exp(target_box_data[offset + 2]) * prior_box_width; + target_box_height = + std::exp(target_box_data[offset + 3]) * prior_box_height; + } output[offset] = target_box_center_x - target_box_width / 2; output[offset + 1] = target_box_center_y - target_box_height / 2; - output[offset + 2] = target_box_center_x + target_box_width / 2; - output[offset + 3] = target_box_center_y + target_box_height / 2; + output[offset + 2] = + target_box_center_x + target_box_width / 2 - (normalized == false); + output[offset + 3] = + target_box_center_y + target_box_height / 2 - (normalized == false); } } } @@ -139,11 +163,14 @@ class BoxCoderKernel : public framework::OpKernel { output_box->mutable_data({row, col, len}, context.GetPlace()); auto code_type = GetBoxCodeType(context.Attr("code_type")); + bool normalized = context.Attr("box_normalized"); T* output = output_box->data(); if (code_type == BoxCodeType::kEncodeCenterSize) { - EncodeCenterSize(*target_box, *prior_box, *prior_box_var, output); + EncodeCenterSize(target_box, prior_box, prior_box_var, normalized, + output); } else if (code_type == BoxCodeType::kDecodeCenterSize) { - DecodeCenterSize(*target_box, *prior_box, *prior_box_var, output); + DecodeCenterSize(target_box, prior_box, prior_box_var, normalized, + output); } } }; diff --git a/paddle/fluid/operators/iou_similarity_op.cc b/paddle/fluid/operators/detection/iou_similarity_op.cc similarity index 88% rename from paddle/fluid/operators/iou_similarity_op.cc rename to paddle/fluid/operators/detection/iou_similarity_op.cc index 4b78ec510d..9c89b7ca9a 100644 --- a/paddle/fluid/operators/iou_similarity_op.cc +++ b/paddle/fluid/operators/detection/iou_similarity_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/iou_similarity_op.h" +#include "paddle/fluid/operators/detection/iou_similarity_op.h" namespace paddle { namespace operators { @@ -42,8 +42,7 @@ class IOUSimilarityOp : public framework::OperatorWithKernel { class IOUSimilarityOpMaker : public framework::OpProtoAndCheckerMaker { public: - IOUSimilarityOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, default LoDTensor) " "Box list X is a 2-D LoDTensor with shape [N, 4] holds N boxes, " @@ -69,15 +68,16 @@ class IOUSimilarityOpMaker : public framework::OpProtoAndCheckerMaker { "representing pairwise iou scores."); AddComment(R"DOC( -IOU Similarity Operator. +**IOU Similarity Operator** + Computes intersection-over-union (IOU) between two box lists. - Box list 'X' should be a LoDTensor and 'Y' is a common Tensor, - boxes in 'Y' are shared by all instance of the batched inputs of X. - Given two boxes A and B, the calculation of IOU is as follows: +Box list 'X' should be a LoDTensor and 'Y' is a common Tensor, +boxes in 'Y' are shared by all instance of the batched inputs of X. +Given two boxes A and B, the calculation of IOU is as follows: $$ IOU(A, B) = -\frac{area(A\cap B)}{area(A)+area(B)-area(A\cap B)} +\\frac{area(A\\cap B)}{area(A)+area(B)-area(A\\cap B)} $$ )DOC"); diff --git a/paddle/fluid/operators/iou_similarity_op.cu b/paddle/fluid/operators/detection/iou_similarity_op.cu similarity index 92% rename from paddle/fluid/operators/iou_similarity_op.cu rename to paddle/fluid/operators/detection/iou_similarity_op.cu index f40a388d62..8342b4138c 100644 --- a/paddle/fluid/operators/iou_similarity_op.cu +++ b/paddle/fluid/operators/detection/iou_similarity_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/iou_similarity_op.h" +#include "paddle/fluid/operators/detection/iou_similarity_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( diff --git a/paddle/fluid/operators/iou_similarity_op.h b/paddle/fluid/operators/detection/iou_similarity_op.h similarity index 100% rename from paddle/fluid/operators/iou_similarity_op.h rename to paddle/fluid/operators/detection/iou_similarity_op.h diff --git a/paddle/fluid/operators/mine_hard_examples_op.cc b/paddle/fluid/operators/detection/mine_hard_examples_op.cc similarity index 98% rename from paddle/fluid/operators/mine_hard_examples_op.cc rename to paddle/fluid/operators/detection/mine_hard_examples_op.cc index 277901cff4..54a4b87ec8 100644 --- a/paddle/fluid/operators/mine_hard_examples_op.cc +++ b/paddle/fluid/operators/detection/mine_hard_examples_op.cc @@ -227,6 +227,9 @@ class MineHardExamplesOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_GT( neg_pos_ratio, 0.0f, "neg_pos_ratio must greater than zero in max_negative mode"); + PADDLE_ENFORCE_LT( + neg_dist_threshold, 1.0f, + "neg_dist_threshold must less than one in max_negative mode"); PADDLE_ENFORCE_GT( neg_dist_threshold, 0.0f, "neg_dist_threshold must greater than zero in max_negative mode"); @@ -253,8 +256,7 @@ class MineHardExamplesOp : public framework::OperatorWithKernel { class MineHardExamplesOpMaker : public framework::OpProtoAndCheckerMaker { public: - MineHardExamplesOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "ClsLoss", "(Tensor, default Tensor), The classification loss with shape " diff --git a/paddle/fluid/operators/multiclass_nms_op.cc b/paddle/fluid/operators/detection/multiclass_nms_op.cc similarity index 99% rename from paddle/fluid/operators/multiclass_nms_op.cc rename to paddle/fluid/operators/detection/multiclass_nms_op.cc index a12b975326..60b93efdce 100644 --- a/paddle/fluid/operators/multiclass_nms_op.cc +++ b/paddle/fluid/operators/detection/multiclass_nms_op.cc @@ -309,8 +309,7 @@ class MultiClassNMSKernel : public framework::OpKernel { class MultiClassNMSOpMaker : public framework::OpProtoAndCheckerMaker { public: - MultiClassNMSOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("BBoxes", "(Tensor) A 3-D Tensor with shape [N, M, 4] represents the " "predicted locations of M bounding bboxes, N is the batch size. " diff --git a/paddle/fluid/operators/detection/polygon_box_transform_op.cc b/paddle/fluid/operators/detection/polygon_box_transform_op.cc new file mode 100644 index 0000000000..568d50d457 --- /dev/null +++ b/paddle/fluid/operators/detection/polygon_box_transform_op.cc @@ -0,0 +1,107 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class PolygonBoxTransformCPUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_cpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto* in = ctx.Input("Input"); + auto in_dims = in->dims(); + const T* in_data = in->data(); + auto* out = ctx.Output("Output"); + T* out_data = out->mutable_data(ctx.GetPlace()); + + int batch_size = in_dims[0]; + int geo_channel = in_dims[1]; + int height = in_dims[2]; + int width = in_dims[3]; + int id = 0; + for (int id_n = 0; id_n < batch_size * geo_channel; ++id_n) { + for (int id_h = 0; id_h < height; ++id_h) { + for (int id_w = 0; id_w < width; ++id_w) { + id = id_n * height * width + width * id_h + id_w; + if (id_n % 2 == 0) { + out_data[id] = id_w - in_data[id]; + } else { + out_data[id] = id_h - in_data[id]; + } + } + } + } + } +}; + +class PolygonBoxTransformOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE( + ctx->HasInput("Input"), + "Input (Input) of polygon_box transform op should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("Output"), + "Output (Output) of polygon_box transform op should not be null."); + + auto in_dim = ctx->GetInputDim("Input"); + + PADDLE_ENFORCE_EQ(in_dim.size(), 4, "input's rank must be 4."); + PADDLE_ENFORCE_EQ(in_dim[1] % 2, 0, + "input's second dimension must be even."); + + ctx->SetOutputDim("Output", in_dim); + } +}; + +class PolygonBoxTransformOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput( + "Input", + "The input with shape [batch_size, geometry_channels, height, width]"); + AddOutput("Output", "The output with the same shape as input"); + + AddComment(R"DOC( +PolygonBoxTransform Operator. + +PolygonBoxTransform Operator is used to transform the coordinate shift to the real coordinate. + +The input is the final geometry output in detection network. +We use 2*n numbers to denote the coordinate shift from n corner vertices of +the polygon_box to the pixel location. As each distance offset contains two numbers (xi, yi), +the geometry output contains 2*n channels. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(polygon_box_transform, ops::PolygonBoxTransformOp, + ops::PolygonBoxTransformOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL( + polygon_box_transform, + ops::PolygonBoxTransformCPUKernel, + ops::PolygonBoxTransformCPUKernel); diff --git a/paddle/fluid/operators/detection/polygon_box_transform_op.cu b/paddle/fluid/operators/detection/polygon_box_transform_op.cu new file mode 100644 index 0000000000..6187ac6622 --- /dev/null +++ b/paddle/fluid/operators/detection/polygon_box_transform_op.cu @@ -0,0 +1,76 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/gpu_info.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using platform::PADDLE_CUDA_NUM_THREADS; +#define CUDA_BLOCK_SIZE 16 + +template +__global__ void PolygonBoxTransformKernel(const int n, const int h, const int w, + const T* input, T* output) { + int id_n = threadIdx.x + blockDim.x * blockIdx.x; + int id_h = threadIdx.y + blockDim.y * blockIdx.y; + int id_w = threadIdx.z + blockDim.z * blockIdx.z; + if (id_n < n && id_h < h && id_w < w) { + int id = id_n * h * w + w * id_h + id_w; + if (id_n % 2 == 0) { + output[id] = id_w - input[id]; + } else { + output[id] = id_h - input[id]; + } + } +} + +template +class PolygonBoxTransformOpCUDAKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "It must use CUDAPlace."); + auto* in = ctx.Input("Input"); + auto in_dims = in->dims(); + const T* in_data = in->data(); + auto* out = ctx.Output("Output"); + T* out_data = out->mutable_data(ctx.GetPlace()); + + int batch_size = in_dims[0]; + int geo_channels = in_dims[1]; + int height = in_dims[2]; + int width = in_dims[3]; + dim3 threadsPerBlock( + PADDLE_CUDA_NUM_THREADS / (CUDA_BLOCK_SIZE * CUDA_BLOCK_SIZE), + CUDA_BLOCK_SIZE, CUDA_BLOCK_SIZE); + dim3 numBlocks((batch_size * geo_channels) / threadsPerBlock.x, + (height + threadsPerBlock.y - 1) / threadsPerBlock.y, + (width + threadsPerBlock.z - 1) / threadsPerBlock.z); + auto stream = ctx.cuda_device_context().stream(); + PolygonBoxTransformKernel<<>>( + batch_size * geo_channels, height, width, in_data, out_data); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_CUDA_KERNEL( + polygon_box_transform, + paddle::operators::PolygonBoxTransformOpCUDAKernel, + paddle::operators::PolygonBoxTransformOpCUDAKernel); diff --git a/paddle/fluid/operators/prior_box_op.cc b/paddle/fluid/operators/detection/prior_box_op.cc similarity index 93% rename from paddle/fluid/operators/prior_box_op.cc rename to paddle/fluid/operators/detection/prior_box_op.cc index 058b13eeb8..b5cb6a724c 100644 --- a/paddle/fluid/operators/prior_box_op.cc +++ b/paddle/fluid/operators/detection/prior_box_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/prior_box_op.h" +#include "paddle/fluid/operators/detection/prior_box_op.h" namespace paddle { namespace operators { @@ -79,8 +79,7 @@ class PriorBoxOp : public framework::OperatorWithKernel { class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { public: - PriorBoxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(Tensor, default Tensor), " "the input feature data of PriorBoxOp, The layout is NCHW."); @@ -150,6 +149,13 @@ class PriorBoxOpMaker : public framework::OpProtoAndCheckerMaker { "(float) " "Prior boxes center offset.") .SetDefault(0.5); + AddAttr( + "min_max_aspect_ratios_order", + "(bool) If set True, the output prior box is in order of" + "[min, max, aspect_ratios], which is consistent with Caffe." + "Please note, this order affects the weights order of convolution layer" + "followed by and does not affect the final detection results.") + .SetDefault(false); AddComment(R"DOC( Prior box operator Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. diff --git a/paddle/fluid/operators/prior_box_op.cu b/paddle/fluid/operators/detection/prior_box_op.cu similarity index 85% rename from paddle/fluid/operators/prior_box_op.cu rename to paddle/fluid/operators/detection/prior_box_op.cu index 0ea8909296..1ea8cfc1d2 100644 --- a/paddle/fluid/operators/prior_box_op.cu +++ b/paddle/fluid/operators/detection/prior_box_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/prior_box_op.h" +#include "paddle/fluid/operators/detection/prior_box_op.h" namespace paddle { namespace operators { @@ -28,8 +28,8 @@ __global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height, const int im_width, const int as_num, const T offset, const T step_width, const T step_height, const T* min_sizes, - const T* max_sizes, const int min_num, - bool is_clip) { + const T* max_sizes, const int min_num, bool is_clip, + bool min_max_aspect_ratios_order) { int num_priors = max_sizes ? as_num * min_num + min_num : as_num * min_num; int box_num = height * width * num_priors; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < box_num; @@ -44,14 +44,28 @@ __global__ void GenPriorBox(T* out, const T* aspect_ratios, const int height, T min_size = min_sizes[m]; if (max_sizes) { int s = p % (as_num + 1); - if (s < as_num) { - T ar = aspect_ratios[s]; - bw = min_size * sqrt(ar) / 2.; - bh = min_size / sqrt(ar) / 2.; + if (!min_max_aspect_ratios_order) { + if (s < as_num) { + T ar = aspect_ratios[s]; + bw = min_size * sqrt(ar) / 2.; + bh = min_size / sqrt(ar) / 2.; + } else { + T max_size = max_sizes[m]; + bw = sqrt(min_size * max_size) / 2.; + bh = bw; + } } else { - T max_size = max_sizes[m]; - bw = sqrt(min_size * max_size) / 2.; - bh = bw; + if (s == 0) { + bw = bh = min_size / 2.; + } else if (s == 1) { + T max_size = max_sizes[m]; + bw = sqrt(min_size * max_size) / 2.; + bh = bw; + } else { + T ar = aspect_ratios[s - 1]; + bw = min_size * sqrt(ar) / 2.; + bh = min_size / sqrt(ar) / 2.; + } } } else { int s = p % as_num; @@ -94,6 +108,8 @@ class PriorBoxOpCUDAKernel : public framework::OpKernel { auto variances = ctx.Attr>("variances"); auto flip = ctx.Attr("flip"); auto clip = ctx.Attr("clip"); + auto min_max_aspect_ratios_order = + ctx.Attr("min_max_aspect_ratios_order"); std::vector aspect_ratios; ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); @@ -149,7 +165,7 @@ class PriorBoxOpCUDAKernel : public framework::OpKernel { GenPriorBox<<>>( boxes->data(), r.data(), height, width, im_height, im_width, aspect_ratios.size(), offset, step_width, step_height, min.data(), - max_data, min_num, clip); + max_data, min_num, clip, min_max_aspect_ratios_order); framework::Tensor v; framework::TensorFromVector(variances, ctx.device_context(), &v); diff --git a/paddle/fluid/operators/prior_box_op.h b/paddle/fluid/operators/detection/prior_box_op.h similarity index 66% rename from paddle/fluid/operators/prior_box_op.h rename to paddle/fluid/operators/detection/prior_box_op.h index 1c62fd8d2c..4e226abbb5 100644 --- a/paddle/fluid/operators/prior_box_op.h +++ b/paddle/fluid/operators/detection/prior_box_op.h @@ -68,6 +68,8 @@ class PriorBoxOpKernel : public framework::OpKernel { auto variances = ctx.Attr>("variances"); auto flip = ctx.Attr("flip"); auto clip = ctx.Attr("clip"); + auto min_max_aspect_ratios_order = + ctx.Attr("min_max_aspect_ratios_order"); std::vector aspect_ratios; ExpandAspectRatios(input_aspect_ratio, flip, &aspect_ratios); @@ -108,26 +110,59 @@ class PriorBoxOpKernel : public framework::OpKernel { int idx = 0; for (size_t s = 0; s < min_sizes.size(); ++s) { auto min_size = min_sizes[s]; - // priors with different aspect ratios - for (size_t r = 0; r < aspect_ratios.size(); ++r) { - float ar = aspect_ratios[r]; - box_width = min_size * sqrt(ar) / 2.; - box_height = min_size / sqrt(ar) / 2.; - e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; - e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; - e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; - e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; - idx++; - } - if (max_sizes.size() > 0) { - auto max_size = max_sizes[s]; - // square prior with size sqrt(minSize * maxSize) - box_width = box_height = sqrt(min_size * max_size) / 2.; + if (min_max_aspect_ratios_order) { + box_width = box_height = min_size / 2.; e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; idx++; + if (max_sizes.size() > 0) { + auto max_size = max_sizes[s]; + // square prior with size sqrt(minSize * maxSize) + box_width = box_height = sqrt(min_size * max_size) / 2.; + e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; + e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; + e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; + e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; + idx++; + } + // priors with different aspect ratios + for (size_t r = 0; r < aspect_ratios.size(); ++r) { + float ar = aspect_ratios[r]; + if (fabs(ar - 1.) < 1e-6) { + continue; + } + box_width = min_size * sqrt(ar) / 2.; + box_height = min_size / sqrt(ar) / 2.; + e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; + e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; + e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; + e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; + idx++; + } + } else { + // priors with different aspect ratios + for (size_t r = 0; r < aspect_ratios.size(); ++r) { + float ar = aspect_ratios[r]; + box_width = min_size * sqrt(ar) / 2.; + box_height = min_size / sqrt(ar) / 2.; + e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; + e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; + e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; + e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; + idx++; + } + if (max_sizes.size() > 0) { + auto max_size = max_sizes[s]; + // square prior with size sqrt(minSize * maxSize) + box_width = box_height = sqrt(min_size * max_size) / 2.; + e_boxes(h, w, idx, 0) = (center_x - box_width) / img_width; + e_boxes(h, w, idx, 1) = (center_y - box_height) / img_height; + e_boxes(h, w, idx, 2) = (center_x + box_width) / img_width; + e_boxes(h, w, idx, 3) = (center_y + box_height) / img_height; + idx++; + } } } } diff --git a/paddle/fluid/operators/detection/rpn_target_assign_op.cc b/paddle/fluid/operators/detection/rpn_target_assign_op.cc new file mode 100644 index 0000000000..9a1643d5b3 --- /dev/null +++ b/paddle/fluid/operators/detection/rpn_target_assign_op.cc @@ -0,0 +1,283 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/math/math_function.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using LoDTensor = framework::LoDTensor; +template +using EigenMatrix = framework::EigenMatrix; + +class RpnTargetAssignOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("DistMat"), + "Input(DistMat) of RpnTargetAssignOp should not be null"); + + PADDLE_ENFORCE( + ctx->HasOutput("LocationIndex"), + "Output(LocationIndex) of RpnTargetAssignOp should not be null"); + PADDLE_ENFORCE( + ctx->HasOutput("ScoreIndex"), + "Output(ScoreIndex) of RpnTargetAssignOp should not be null"); + PADDLE_ENFORCE( + ctx->HasOutput("TargetLabel"), + "Output(TargetLabel) of RpnTargetAssignOp should not be null"); + + auto in_dims = ctx->GetInputDim("DistMat"); + PADDLE_ENFORCE_EQ(in_dims.size(), 2, + "The rank of Input(DistMat) must be 2."); + } +}; + +template +class RpnTargetAssignKernel : public framework::OpKernel { + public: + void ScoreAssign(const T* dist_data, const Tensor& anchor_to_gt_max, + const int row, const int col, const float pos_threshold, + const float neg_threshold, int64_t* target_label_data, + std::vector* fg_inds, std::vector* bg_inds) const { + int fg_offset = fg_inds->size(); + int bg_offset = bg_inds->size(); + for (int64_t i = 0; i < row; ++i) { + const T* v = dist_data + i * col; + T max_dist = *std::max_element(v, v + col); + for (int64_t j = 0; j < col; ++j) { + T val = dist_data[i * col + j]; + if (val == max_dist) target_label_data[j] = 1; + } + } + + // Pick the fg/bg and count the number + for (int64_t j = 0; j < col; ++j) { + if (anchor_to_gt_max.data()[j] > pos_threshold) { + target_label_data[j] = 1; + } else if (anchor_to_gt_max.data()[j] < neg_threshold) { + target_label_data[j] = 0; + } + if (target_label_data[j] == 1) { + fg_inds->push_back(fg_offset + j); + } else if (target_label_data[j] == 0) { + bg_inds->push_back(bg_offset + j); + } + } + } + + void ReservoirSampling(const int num, const int offset, + std::minstd_rand engine, + std::vector* inds) const { + std::uniform_real_distribution uniform(0, 1); + const int64_t size = static_cast(inds->size()); + if (size > num) { + for (int64_t i = num; i < size; ++i) { + int rng_ind = std::floor(uniform(engine) * i); + if (rng_ind < num) + std::iter_swap(inds->begin() + rng_ind + offset, + inds->begin() + i + offset); + } + } + } + + void RpnTargetAssign(const framework::ExecutionContext& ctx, + const Tensor& dist, const float pos_threshold, + const float neg_threshold, const int rpn_batch_size, + const int fg_num, std::minstd_rand engine, + std::vector* fg_inds, std::vector* bg_inds, + int64_t* target_label_data) const { + auto* dist_data = dist.data(); + int64_t row = dist.dims()[0]; + int64_t col = dist.dims()[1]; + int fg_offset = fg_inds->size(); + int bg_offset = bg_inds->size(); + + // Calculate the max IoU between anchors and gt boxes + Tensor anchor_to_gt_max; + anchor_to_gt_max.mutable_data( + framework::make_ddim({static_cast(col), 1}), + platform::CPUPlace()); + auto& place = *ctx.template device_context() + .eigen_device(); + auto x = EigenMatrix::From(dist); + auto x_col_max = EigenMatrix::From(anchor_to_gt_max); + x_col_max.device(place) = + x.maximum(Eigen::DSizes(0)) + .reshape(Eigen::DSizes(static_cast(col), 1)); + // Follow the Faster RCNN's implementation + ScoreAssign(dist_data, anchor_to_gt_max, row, col, pos_threshold, + neg_threshold, target_label_data, fg_inds, bg_inds); + // Reservoir Sampling + ReservoirSampling(fg_num, fg_offset, engine, fg_inds); + int bg_num = rpn_batch_size - fg_inds->size(); + ReservoirSampling(bg_num, bg_offset, engine, bg_inds); + } + + void Compute(const framework::ExecutionContext& context) const override { + auto* dist = context.Input("DistMat"); + auto* loc_index = context.Output("LocationIndex"); + auto* score_index = context.Output("ScoreIndex"); + auto* tgt_lbl = context.Output("TargetLabel"); + + auto col = dist->dims()[1]; + int64_t n = dist->lod().size() == 0UL + ? 1 + : static_cast(dist->lod().back().size() - 1); + if (dist->lod().size()) { + PADDLE_ENFORCE_EQ(dist->lod().size(), 1UL, + "Only support 1 level of LoD."); + } + int rpn_batch_size = context.Attr("rpn_batch_size_per_im"); + float pos_threshold = context.Attr("rpn_positive_overlap"); + float neg_threshold = context.Attr("rpn_negative_overlap"); + float fg_fraction = context.Attr("fg_fraction"); + + int fg_num = static_cast(rpn_batch_size * fg_fraction); + + int64_t* target_label_data = + tgt_lbl->mutable_data({n * col, 1}, context.GetPlace()); + + auto& dev_ctx = context.device_context(); + math::SetConstant iset; + iset(dev_ctx, tgt_lbl, static_cast(-1)); + + std::vector fg_inds; + std::vector bg_inds; + std::random_device rnd; + std::minstd_rand engine; + int seed = + context.Attr("fix_seed") ? context.Attr("seed") : rnd(); + engine.seed(seed); + + if (n == 1) { + RpnTargetAssign(context, *dist, pos_threshold, neg_threshold, + rpn_batch_size, fg_num, engine, &fg_inds, &bg_inds, + target_label_data); + } else { + auto lod = dist->lod().back(); + for (size_t i = 0; i < lod.size() - 1; ++i) { + Tensor one_ins = dist->Slice(lod[i], lod[i + 1]); + RpnTargetAssign(context, one_ins, pos_threshold, neg_threshold, + rpn_batch_size, fg_num, engine, &fg_inds, &bg_inds, + target_label_data + i * col); + } + } + int* loc_index_data = loc_index->mutable_data( + {static_cast(fg_inds.size())}, context.GetPlace()); + int* score_index_data = score_index->mutable_data( + {static_cast(fg_inds.size() + bg_inds.size())}, + context.GetPlace()); + memcpy(loc_index_data, reinterpret_cast(&fg_inds[0]), + fg_inds.size() * sizeof(int)); + memcpy(score_index_data, reinterpret_cast(&fg_inds[0]), + fg_inds.size() * sizeof(int)); + memcpy(score_index_data + fg_inds.size(), + reinterpret_cast(&bg_inds[0]), bg_inds.size() * sizeof(int)); + } +}; + +class RpnTargetAssignOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput( + "DistMat", + "(LoDTensor or Tensor) this input is a 2-D LoDTensor with shape " + "[K, M]. It is pair-wise distance matrix between the entities " + "represented by each row and each column. For example, assumed one " + "entity is A with shape [K], another entity is B with shape [M]. The " + "DistMat[i][j] is the distance between A[i] and B[j]. The bigger " + "the distance is, the better macthing the pairs are. Please note, " + "This tensor can contain LoD information to represent a batch of " + "inputs. One instance of this batch can contain different numbers of " + "entities."); + AddAttr( + "rpn_positive_overlap", + "Minimum overlap required between an anchor and ground-truth " + "box for the (anchor, gt box) pair to be a positive example.") + .SetDefault(0.7); + AddAttr( + "rpn_negative_overlap", + "Maximum overlap allowed between an anchor and ground-truth " + "box for the (anchor, gt box) pair to be a negative examples.") + .SetDefault(0.3); + AddAttr( + "fg_fraction", + "Target fraction of RoI minibatch that " + "is labeled foreground (i.e. class > 0), 0-th class is background.") + .SetDefault(0.25); + AddAttr("rpn_batch_size_per_im", + "Total number of RPN examples per image.") + .SetDefault(256); + AddAttr("fix_seed", + "A flag indicating whether to use a fixed seed to generate " + "random mask. NOTE: DO NOT set this flag to true in " + "training. Setting this flag to true is only useful in " + "unittest.") + .SetDefault(false); + AddAttr("seed", "RpnTargetAssign random seed.").SetDefault(0); + AddOutput( + "LocationIndex", + "(Tensor), The indexes of foreground anchors in all RPN anchors, the " + "shape of the LocationIndex is [F], F depends on the value of input " + "tensor and attributes."); + AddOutput( + "ScoreIndex", + "(Tensor), The indexes of foreground and background anchors in all " + "RPN anchors(The rest anchors are ignored). The shape of the " + "ScoreIndex is [F + B], F and B depend on the value of input " + "tensor and attributes."); + AddOutput("TargetLabel", + "(Tensor), The target labels of each anchor with shape " + "[K * M, 1], " + "K and M is the same as they are in DistMat."); + AddComment(R"DOC( +This operator can be, for given the IoU between the ground truth bboxes and the +anchors, to assign classification and regression targets to each prediction. +The Score index and LocationIndex will be generated according to the DistMat. +The rest anchors would not contibute to the RPN training loss + +ScoreIndex is composed of foreground anchor indexes(positive labels) and +background anchor indexes(negative labels). LocationIndex is exactly same +as the foreground anchor indexes since we can not assign regression target to +the background anchors. + +The classification targets(TargetLabel) is a binary class label (of being +an object or not). Following the paper of Faster-RCNN, the positive labels +are two kinds of anchors: (i) the anchor/anchors with the highest IoU +overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap +higher than rpn_positive_overlap(0.7) with any ground-truth box. Note that +a single ground-truth box may assign positive labels to multiple anchors. +A non-positive anchor is when its IoU ratio is lower than rpn_negative_overlap +(0.3) for all ground-truth boxes. Anchors that are neither positive nor +negative do not contribute to the training objective. + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(rpn_target_assign, ops::RpnTargetAssignOp, + ops::RpnTargetAssignOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(rpn_target_assign, ops::RpnTargetAssignKernel, + ops::RpnTargetAssignKernel); diff --git a/paddle/fluid/operators/target_assign_op.cc b/paddle/fluid/operators/detection/target_assign_op.cc similarity index 97% rename from paddle/fluid/operators/target_assign_op.cc rename to paddle/fluid/operators/detection/target_assign_op.cc index 33ff967e5e..3670019392 100644 --- a/paddle/fluid/operators/target_assign_op.cc +++ b/paddle/fluid/operators/detection/target_assign_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/target_assign_op.h" +#include "paddle/fluid/operators/detection/target_assign_op.h" namespace paddle { namespace operators { @@ -65,8 +65,7 @@ class TargetAssignOp : public framework::OperatorWithKernel { class TargetAssignOpMaker : public framework::OpProtoAndCheckerMaker { public: - TargetAssignOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), This input is a 3D LoDTensor with shape [M, P, K]. " "Some elements in X will be assigned to Out based on the " diff --git a/paddle/fluid/operators/target_assign_op.cu b/paddle/fluid/operators/detection/target_assign_op.cu similarity index 97% rename from paddle/fluid/operators/target_assign_op.cu rename to paddle/fluid/operators/detection/target_assign_op.cu index 24664f99b2..ddf6889942 100644 --- a/paddle/fluid/operators/target_assign_op.cu +++ b/paddle/fluid/operators/detection/target_assign_op.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/target_assign_op.h" +#include "paddle/fluid/operators/detection/target_assign_op.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/target_assign_op.h b/paddle/fluid/operators/detection/target_assign_op.h similarity index 96% rename from paddle/fluid/operators/target_assign_op.h rename to paddle/fluid/operators/detection/target_assign_op.h index 3d52973741..7f989dfca6 100644 --- a/paddle/fluid/operators/target_assign_op.h +++ b/paddle/fluid/operators/detection/target_assign_op.h @@ -106,7 +106,11 @@ class TargetAssignKernel : public framework::OpKernel { int64_t k = x->dims()[2]; auto x_lod = x->lod().back(); +#if defined(PADDLE_WITH_CUDA) size_t* x_lod_data = x_lod.MutableData(ctx.GetPlace()); +#else + size_t* x_lod_data = x_lod.data(); +#endif TargetAssignFunctor functor(x_data, match_idx_data, x_lod_data, mismatch_value, n, m, p, k, out_data, @@ -121,7 +125,11 @@ class TargetAssignKernel : public framework::OpKernel { PADDLE_ENFORCE_EQ(neg_indices->lod().size(), 1UL); const int* neg_idx_data = neg_indices->data(); auto neg_lod = neg_indices->lod().back(); +#if defined(PADDLE_WITH_CUDA) size_t* neg_lod_data = neg_lod.MutableData(ctx.GetPlace()); +#else + size_t* neg_lod_data = neg_lod.data(); +#endif NegTargetAssignFunctor neg_trg_functor; neg_trg_functor(device_ctx, neg_idx_data, neg_lod_data, n, m, k, mismatch_value, out_data, out_wt_data); diff --git a/paddle/fluid/operators/detection_map_op.cc b/paddle/fluid/operators/detection_map_op.cc index 38f43b6d03..d7f49a9590 100644 --- a/paddle/fluid/operators/detection_map_op.cc +++ b/paddle/fluid/operators/detection_map_op.cc @@ -51,7 +51,8 @@ class DetectionMAPOp : public framework::OperatorWithKernel { PADDLE_ENFORCE_EQ(label_dims.size(), 2, "The rank of Input(Label) must be 2, " "the shape is [N, 6]."); - PADDLE_ENFORCE_EQ(label_dims[1], 6, "The shape is of Input(Label) [N, 6]."); + PADDLE_ENFORCE(label_dims[1] == 6 || label_dims[1] == 5, + "The shape of Input(Label) is [N, 6] or [N, 5]."); if (ctx->HasInput("PosCount")) { PADDLE_ENFORCE(ctx->HasInput("TruePos"), @@ -78,8 +79,7 @@ class DetectionMAPOp : public framework::OperatorWithKernel { class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { public: - DetectionMAPOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("DetectRes", "(LoDTensor) A 2-D LoDTensor with shape [M, 6] represents the " "detections. Each row has 6 values: " @@ -89,9 +89,10 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { "offset is N + 1, if LoD[i + 1] - LoD[i] == 0, means there is " "no detected data."); AddInput("Label", - "(LoDTensor) A 2-D LoDTensor with shape[N, 6] represents the" + "(LoDTensor) A 2-D LoDTensor represents the" "Labeled ground-truth data. Each row has 6 values: " - "[label, is_difficult, xmin, ymin, xmax, ymax], N is the total " + "[label, xmin, ymin, xmax, ymax, is_difficult] or 5 values: " + "[label, xmin, ymin, xmax, ymax], where N is the total " "number of ground-truth data in this mini-batch. For each " "instance, the offsets in first dimension are called LoD, " "the number of offset is N + 1, if LoD[i + 1] - LoD[i] == 0, " @@ -174,12 +175,12 @@ class DetectionMAPOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Detection mAP evaluate operator. The general steps are as follows. First, calculate the true positive and - false positive according to the input of detection and labels, then - calculate the mAP evaluate value. - Supporting '11 point' and 'integral' mAP algorithm. Please get more information - from the following articles: - https://sanchom.wordpress.com/tag/average-precision/ - https://arxiv.org/abs/1512.02325 +false positive according to the input of detection and labels, then +calculate the mAP evaluate value. +Supporting '11 point' and 'integral' mAP algorithm. Please get more information +from the following articles: +https://sanchom.wordpress.com/tag/average-precision/ +https://arxiv.org/abs/1512.02325 )DOC"); } diff --git a/paddle/fluid/operators/detection_map_op.h b/paddle/fluid/operators/detection_map_op.h index 431812e2bf..dd1ab85fd8 100644 --- a/paddle/fluid/operators/detection_map_op.h +++ b/paddle/fluid/operators/detection_map_op.h @@ -72,7 +72,7 @@ class DetectionMAPOpKernel : public framework::OpKernel { auto* out_false_pos = ctx.Output("AccumFalsePos"); float overlap_threshold = ctx.Attr("overlap_threshold"); - float evaluate_difficult = ctx.Attr("evaluate_difficult"); + bool evaluate_difficult = ctx.Attr("evaluate_difficult"); auto ap_type = GetAPType(ctx.Attr("ap_type")); int class_num = ctx.Attr("class_num"); @@ -175,14 +175,20 @@ class DetectionMAPOpKernel : public framework::OpKernel { for (int n = 0; n < batch_size; ++n) { std::map> boxes; for (size_t i = label_index[n]; i < label_index[n + 1]; ++i) { - Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5)); int label = labels(i, 0); - auto is_difficult = labels(i, 1); - if (std::abs(is_difficult - 0.0) < 1e-6) - box.is_difficult = false; - else - box.is_difficult = true; - boxes[label].push_back(box); + if (input_label.dims()[1] == 6) { + Box box(labels(i, 2), labels(i, 3), labels(i, 4), labels(i, 5)); + auto is_difficult = labels(i, 1); + if (std::abs(is_difficult - 0.0) < 1e-6) + box.is_difficult = false; + else + box.is_difficult = true; + boxes[label].push_back(box); + } else { + PADDLE_ENFORCE_EQ(input_label.dims()[1], 5); + Box box(labels(i, 1), labels(i, 2), labels(i, 3), labels(i, 4)); + boxes[label].push_back(box); + } } gt_boxes->push_back(boxes); } diff --git a/paddle/fluid/operators/distributed/CMakeLists.txt b/paddle/fluid/operators/distributed/CMakeLists.txt new file mode 100644 index 0000000000..da5d20505e --- /dev/null +++ b/paddle/fluid/operators/distributed/CMakeLists.txt @@ -0,0 +1,43 @@ +if(NOT WITH_DISTRIBUTE) + return() +endif() + +if(WITH_GRPC) + set(cc_generic_services "false") +else() + set(cc_generic_services "true") +endif() +configure_file(send_recv.proto.in ${CMAKE_CURRENT_SOURCE_DIR}/send_recv.proto @ONLY) + +if(WITH_GRPC) + grpc_library(sendrecvop_grpc SRCS grpc_bytebuffer_stream.cc sendrecvop_utils.cc grpc_client.cc + request_handler_impl.cc rpc_client.cc rpc_server.cc grpc_server.cc variable_response.cc grpc_variable_response.cc grpc_serde.cc + PROTO send_recv.proto + DEPS lod_tensor selected_rows memory) + set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + set_source_files_properties(grpc_serde_test.cc rpc_server_test.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + cc_test(grpc_serde_test SRCS grpc_serde_test.cc + DEPS grpc++_unsecure grpc_unsecure gpr cares zlib protobuf sendrecvop_grpc scope profiler math_function SERIAL) + cc_test(rpc_server_test SRCS rpc_server_test.cc + DEPS sendrecvop_grpc grpc++_unsecure grpc_unsecure gpr cares zlib protobuf executor proto_desc lookup_sparse_table_op SERIAL) + return() +endif() + + +set(DISTRIBUTE_COMPILE_FLAGS "-Wno-non-virtual-dtor -Wno-error=non-virtual-dtor -Wno-error=delete-non-virtual-dtor") + +set_source_files_properties(brpc_server.cc brpc_client.cc rpc_server_test.cc brpc_serde_test.cc + brpc_variable_response.cc brpc_sendrecvop_utils.cc brpc_rdma_pool.cc PROPERTIES COMPILE_FLAGS ${DISTRIBUTE_COMPILE_FLAGS}) + +brpc_library(sendrecvop_brpc SRCS brpc_client.cc brpc_server.cc rpc_server.cc rpc_client.cc request_handler_impl.cc brpc_sendrecvop_utils.cc + brpc_variable_response.cc variable_response.cc sendrecvop_utils.cc brpc_rdma_pool.cc + PROTO send_recv.proto + DEPS lod_tensor selected_rows memory) + +set(brpc_test_depends sendrecvop_brpc brpc ssl crypto protobuf leveldb gflags glog executor proto_desc lookup_table_op snappystream snappy) + +cc_test(brpc_server_test SRCS rpc_server_test.cc + DEPS ${brpc_test_depends} SERIAL) + +cc_test(brpc_serde_test SRCS brpc_serde_test.cc + DEPS ${brpc_test_depends} SERIAL) diff --git a/paddle/fluid/operators/distributed/brpc_client.cc b/paddle/fluid/operators/distributed/brpc_client.cc new file mode 100644 index 0000000000..b394c678fb --- /dev/null +++ b/paddle/fluid/operators/distributed/brpc_client.cc @@ -0,0 +1,180 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/distributed/brpc_client.h" +#include "paddle/fluid/framework/threadpool.h" + +namespace paddle { +namespace operators { +namespace distributed { + +DEFINE_int32(brpc_channel_num, 24, + "Number of channels to send requests connected to one server"); +DEFINE_int32(timeout_ms, 30000, "RPC timeout in milliseconds"); +DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)"); + +BRPCClient::~BRPCClient() { Wait(); } + +void HandleSendResponse(brpc::Controller* cntl, + sendrecv::VoidMessage* response) { + // std::unique_ptr makes sure cntl/response will be deleted before returning. + std::unique_ptr cntl_guard(cntl); + std::unique_ptr response_guard(response); + + if (cntl->Failed()) { + LOG(WARNING) << "Fail to send EchoRequest, " << cntl->ErrorText(); + return; + } + LOG(INFO) << "Received response from " << cntl->remote_side() + << " latency=" << cntl->latency_us() << "us"; +} + +bool BRPCClient::AsyncSendVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& var_name, int64_t time_out) { + const platform::DeviceContext* p_ctx = &ctx; + const std::string ep_val = ep; + const std::string var_name_val = var_name; + const framework::Scope* p_scope = &scope; + const auto ch_ptr = GetChannel(ep_val); + + framework::AsyncIO( + [var_name_val, p_ctx, ep_val, p_scope, time_out, ch_ptr, this] { + auto ch_ctx = ch_ptr->Pop(); + brpc::Controller* cntl = new brpc::Controller(); + sendrecv::VoidMessage* response = new sendrecv::VoidMessage(); + cntl->set_timeout_ms(time_out); + + google::protobuf::Closure* done = + brpc::NewCallback(&HandleSendResponse, cntl, response); + + sendrecv::VariableMessage request; + ch_ctx->stub->SendVariable(cntl, &request, response, done); + }); + req_count_++; + + return true; +} + +void HandleGetResponse(brpc::Controller* cntl, + sendrecv::VariableMessage* response) { + // std::unique_ptr makes sure cntl/response will be deleted before returning. + std::unique_ptr cntl_guard(cntl); + std::unique_ptr response_guard(response); + + if (cntl->Failed()) { + LOG(WARNING) << "Fail to send EchoRequest, " << cntl->ErrorText(); + return; + } + LOG(INFO) << "Received response from " << cntl->remote_side() + << " latency=" << cntl->latency_us() << "us"; + + // framework::Variable* outvar = nullptr; + // DeserializeFromByteBuffer(ret_msg, *var_h.ctx, var_h.scope, &outvar); +} + +bool BRPCClient::AsyncGetVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& var_name, int64_t time_out) { + const platform::DeviceContext* p_ctx = &ctx; + const std::string ep_val = ep; + const std::string var_name_val = var_name; + const framework::Scope* p_scope = &scope; + const auto ch = GetChannel(ep_val); + + framework::AsyncIO( + [var_name_val, ep_val, p_scope, p_ctx, time_out, ch, this] {}); + + req_count_++; + + return true; +} + +bool BRPCClient::AsyncPrefetchVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out) { + const platform::DeviceContext* p_ctx = &ctx; + const std::string ep_val = ep; + const std::string in_var_name_val = in_var_name; + const std::string out_var_name_val = out_var_name; + const framework::Scope* p_scope = &scope; + const auto ch = GetChannel(ep_val); + + framework::AsyncIO([in_var_name_val, out_var_name_val, ep_val, p_scope, p_ctx, + time_out, ch, this] {}); + + req_count_++; + return true; +} + +void BRPCClient::AsyncSendBatchBarrier(const std::string& ep, + int64_t time_out) { + req_count_++; +} + +void BRPCClient::AsyncSendFetchBarrier(const std::string& ep, + int64_t time_out) { + req_count_++; +} + +void BRPCClient::Wait() { + std::unique_lock lk(sync_mutex_); + sync_cond_.wait(lk, [this] { return req_count_ == 0; }); +} + +ChannelQueuePtr BRPCClient::GetChannel(const std::string& ep) { + { + std::lock_guard guard(chan_mutex_); + auto it = channels_.find(ep); + if (it != channels_.end()) { + return it->second; + } + } + + ChannelQueuePtr q(new framework::BlockingQueue()); + + brpc::ChannelOptions options; + options.protocol = "baidu_std"; + options.connection_type = "pooled"; + options.connect_timeout_ms = 100; + options.timeout_ms = FLAGS_timeout_ms /*milliseconds*/; + options.max_retry = FLAGS_max_retry; + for (int i = 0; i < FLAGS_brpc_channel_num; ++i) { + std::shared_ptr c(new ChannelContext()); + if (c->channel.Init(ep.c_str(), &options) != 0) { + LOG(ERROR) << "Fail to initialize channel"; + return nullptr; + } + + c->stub.reset(new sendrecv::SendRecvService_Stub( + static_cast(&c->channel))); + q->Push(c); + } + + { + std::lock_guard guard(chan_mutex_); + channels_[ep] = q; + } + + return q; +} + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/brpc_client.h b/paddle/fluid/operators/distributed/brpc_client.h new file mode 100644 index 0000000000..8ff1f0a607 --- /dev/null +++ b/paddle/fluid/operators/distributed/brpc_client.h @@ -0,0 +1,98 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +#include // NOLINT +#include +#include +#include +#include +#include // NOLINT +#include +#include + +#include "brpc/channel.h" +#include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/distributed/rpc_client.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" +#include "paddle/fluid/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN + +namespace paddle { +namespace operators { +namespace distributed { + +struct ChannelContext { + brpc::Channel channel; + std::shared_ptr stub; +}; + +typedef std::shared_ptr ChannelContextPtr; +typedef std::shared_ptr> + ChannelQueuePtr; + +class BRPCClient : public RPCClient { + public: + BRPCClient() {} + virtual ~BRPCClient(); + + bool AsyncSendVar(const std::string& ep, const platform::DeviceContext& ctx, + const framework::Scope& scope, const std::string& var_name, + int64_t time_out = FLAGS_rpc_deadline) override; + + bool AsyncGetVar(const std::string& ep, const platform::DeviceContext& ctx, + const framework::Scope& scope, const std::string& var_name, + int64_t time_out = FLAGS_rpc_deadline) override; + + bool AsyncPrefetchVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out = FLAGS_rpc_deadline) override; + + void AsyncSendBatchBarrier(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) override; + + void AsyncSendFetchBarrier(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) override; + + void Wait() override; + + private: + void Proceed(); + ChannelQueuePtr GetChannel(const std::string& ep); + + private: + std::unordered_map channels_; + + // mutex for Wait client sync + std::mutex sync_mutex_; + std::condition_variable sync_cond_; + std::atomic req_count_{0}; + + // mutex for GetChannel thread safety + std::mutex chan_mutex_; + DISABLE_COPY_AND_ASSIGN(BRPCClient); +}; + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/brpc_server.cc b/paddle/fluid/operators/distributed/brpc_server.cc new file mode 100644 index 0000000000..862167f020 --- /dev/null +++ b/paddle/fluid/operators/distributed/brpc_server.cc @@ -0,0 +1,144 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/distributed/brpc_server.h" +#include "paddle/fluid/operators/distributed/request_handler.h" + +namespace sendrecv { + +typedef std::unordered_map + HandlerMap; + +class BRPCServiceImpl : public SendRecvService { + public: + explicit BRPCServiceImpl(const HandlerMap& rpc_call_map) + : request_send_h_(nullptr), + request_get_h_(nullptr), + request_prefetch_h_(nullptr) { + auto it = rpc_call_map.find(paddle::operators::distributed::kRequestSend); + if (it != rpc_call_map.end()) { + request_send_h_ = it->second; + } + + it = rpc_call_map.find(paddle::operators::distributed::kRequestSend); + if (it != rpc_call_map.end()) { + request_get_h_ = it->second; + } + + it = rpc_call_map.find(paddle::operators::distributed::kRequestPrefetch); + if (it != rpc_call_map.end()) { + request_prefetch_h_ = it->second; + } + } + + virtual ~BRPCServiceImpl() {} + + void SendVariable(google::protobuf::RpcController* cntl_butil, + const VariableMessage* request, VoidMessage* response, + google::protobuf::Closure* done) override { + PADDLE_ENFORCE(request_send_h_ != nullptr, + "RequestSend handler should be registed first!"); + brpc::ClosureGuard done_guard(done); + + paddle::framework::Scope* local_scope = request_send_h_->scope(); + paddle::framework::Variable* outvar = nullptr; + paddle::framework::Variable* invar = nullptr; + + std::string varname = request->varname(); + + if (!request_send_h_->sync_mode()) { + local_scope = &request_send_h_->scope()->NewScope(); + invar = local_scope->Var(varname); + } else { + invar = local_scope->FindVar(varname); + } + + request_send_h_->Handle(varname, local_scope, invar, &outvar); + + if (!request_send_h_->sync_mode()) { + request_send_h_->scope()->DeleteScope(local_scope); + } + } + + void GetVariable(google::protobuf::RpcController* cntl_butil, + const VariableMessage* request, VariableMessage* response, + google::protobuf::Closure* done) override { + PADDLE_ENFORCE(request_get_h_ != nullptr, + "RequestGet handler should be registed first!"); + } + + void PrefetchVariable(google::protobuf::RpcController* cntl_butil, + const VariableMessage* request, + VariableMessage* response, + google::protobuf::Closure* done) override { + PADDLE_ENFORCE(request_prefetch_h_ != nullptr, + "kRequestPrefetch handler should be registed first!"); + } + + private: + paddle::operators::distributed::RequestHandler* request_send_h_; + paddle::operators::distributed::RequestHandler* request_get_h_; + paddle::operators::distributed::RequestHandler* request_prefetch_h_; +}; +} // namespace sendrecv + +namespace paddle { +namespace operators { +namespace distributed { + +void AsyncBRPCServer::StartServer() { + // Instance of your service. + sendrecv::BRPCServiceImpl service_impl(rpc_call_map_); + + // Add the service into server. Notice the second parameter, because the + // service is put on stack, we don't want server to delete it, otherwise + // use brpc::SERVER_OWNS_SERVICE. + if (server_.AddService(&service_impl, brpc::SERVER_DOESNT_OWN_SERVICE) != 0) { + LOG(FATAL) << "Fail to add service"; + return; + } + + brpc::ServerOptions options; + options.idle_timeout_sec = idle_timeout_s_; + options.max_concurrency = max_concurrency_; + if (server_.Start(bind_address_.c_str(), &options) != 0) { + LOG(FATAL) << "Fail to start EchoServer" << bind_address_; + return; + } + + butil::EndPoint ep = server_.listen_address(); + selected_port_ = ep.port; + + { + std::lock_guard lock(this->mutex_ready_); + ready_ = 1; + } + condition_ready_.notify_all(); + + server_.Join(); +} + +void AsyncBRPCServer::ShutDownImpl() { server_.Stop(1000); } + +void AsyncBRPCServer::WaitServerReady() { + VLOG(3) << "AsyncGRPCServer is wait server ready"; + std::unique_lock lock(this->mutex_ready_); + condition_ready_.wait(lock, [=] { return this->ready_ == 1; }); + VLOG(3) << "AsyncGRPCServer WaitSeverReady"; +} + +}; // namespace distributed +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/distributed/brpc_server.h b/paddle/fluid/operators/distributed/brpc_server.h new file mode 100644 index 0000000000..85a7ad0dfe --- /dev/null +++ b/paddle/fluid/operators/distributed/brpc_server.h @@ -0,0 +1,53 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include // NOLINT +#include // NOLINT +#include + +#include "brpc/server.h" +#include "paddle/fluid/operators/distributed/rpc_server.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" + +namespace paddle { +namespace operators { +namespace distributed { + +class AsyncBRPCServer final : public RPCServer { + public: + explicit AsyncBRPCServer(const std::string& address, int client_num) + : RPCServer(address, client_num), ready_(0) {} + + virtual ~AsyncBRPCServer() {} + void StartServer() override; + void WaitServerReady() override; + + private: + void ShutDownImpl() override; + + brpc::Server server_; + + static constexpr int idle_timeout_s_ = -1; + static constexpr int max_concurrency_ = 0; + + std::mutex mutex_ready_; + std::condition_variable condition_ready_; + int ready_; +}; + +}; // namespace distributed +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.cc b/paddle/fluid/operators/distributed/grpc_bytebuffer_stream.cc similarity index 94% rename from paddle/fluid/operators/detail/bytebuffer_stream.cc rename to paddle/fluid/operators/distributed/grpc_bytebuffer_stream.cc index a14171563e..d192f54ee0 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.cc +++ b/paddle/fluid/operators/distributed/grpc_bytebuffer_stream.cc @@ -17,11 +17,11 @@ limitations under the License. */ // file and did some modifications so that we can send gRPC // requests without too much copying of the tensor data. -#include "paddle/fluid/operators/detail/bytebuffer_stream.h" +#include "paddle/fluid/operators/distributed/grpc_bytebuffer_stream.h" namespace paddle { namespace operators { -namespace detail { +namespace distributed { GrpcByteBufferSource::GrpcByteBufferSource() {} @@ -83,6 +83,6 @@ google::protobuf::int64 GrpcByteBufferSource::ByteCount() const { return byte_count_; } -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/bytebuffer_stream.h b/paddle/fluid/operators/distributed/grpc_bytebuffer_stream.h similarity index 87% rename from paddle/fluid/operators/detail/bytebuffer_stream.h rename to paddle/fluid/operators/distributed/grpc_bytebuffer_stream.h index 054dd4ff29..e9074574cd 100644 --- a/paddle/fluid/operators/detail/bytebuffer_stream.h +++ b/paddle/fluid/operators/distributed/grpc_bytebuffer_stream.h @@ -24,6 +24,7 @@ limitations under the License. */ #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" #include "grpc++/grpc++.h" +#include "paddle/fluid/operators/distributed/variable_response.h" namespace grpc { // A ZeroCopyInputStream that reads from grpc_byte_buffer @@ -106,26 +107,7 @@ class GrpcBufferReader final namespace paddle { namespace operators { -namespace detail { -// Source provides a way for a particular RPC implementation to provide -// received data to ParseFrom. -class Source { - public: - virtual ~Source() {} - - // Return the stream that contains the data to be parsed. - // Note that this method might be invoked more than once if - // ParseFrom needs to fall back to a more expensive parsing method. - // Every call must return a stream pointing at the beginning of - // the serialized RecvTensorResponse. - // - // Note that a subsequent call to contents() invalidates previous - // results of contents(). - // - // Ownership of the returned stream is retained by the Source and - // should not be deleted by the caller. - virtual ::google::protobuf::io::ZeroCopyInputStream* contents() = 0; -}; +namespace distributed { // A ZeroCopyInputStream that reads from a grpc::ByteBuffer. class GrpcByteBufferSource @@ -183,6 +165,6 @@ class GrpcByteSource : public Source { char space_[sizeof(Reader)]; }; -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_client.cc b/paddle/fluid/operators/distributed/grpc_client.cc similarity index 55% rename from paddle/fluid/operators/detail/grpc_client.cc rename to paddle/fluid/operators/distributed/grpc_client.cc index 661dfa69fe..b4f60c9ff9 100644 --- a/paddle/fluid/operators/detail/grpc_client.cc +++ b/paddle/fluid/operators/distributed/grpc_client.cc @@ -12,23 +12,61 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/distributed/grpc_client.h" #include #include +#include "glog/logging.h" // For VLOG #include "paddle/fluid/framework/threadpool.h" +#include "paddle/fluid/operators/distributed/grpc_serde.h" +#include "paddle/fluid/operators/distributed/request_handler.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { -namespace detail { +namespace distributed { -bool RPCClient::AsyncSendVariable(const std::string& ep, - const platform::DeviceContext& ctx, - const framework::Scope& scope, - const std::string& var_name, - int64_t time_out) { +void GRPCClient::InitImpl() { InitEventLoop(); } + +void GRPCClient::InitEventLoop() { + // start the client process thread + // TODO(wuyi): can make this in a threadpool + client_thread_.reset(new std::thread(std::bind(&GRPCClient::Proceed, this))); +} + +void GRPCClient::SendComplete() { + std::unique_lock lk(completed_mutex_); + if (!completed_) { + for (auto& it : channels_) { + VLOG(3) << "send complete message to " << it.first; + this->AsyncSendComplete(it.first); + } + PADDLE_ENFORCE(this->Wait(), "internal grpc error"); + completed_ = true; + } +} + +GRPCClient::~GRPCClient() { + stopped_ = true; + Wait(); + cq_.Shutdown(); + { + std::lock_guard guard(chan_mutex_); + for (auto& it : channels_) { + it.second.reset(); + } + channels_.clear(); + } + + client_thread_->join(); +} + +bool GRPCClient::AsyncSendVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& var_name, int64_t time_out) { const platform::DeviceContext* p_ctx = &ctx; const std::string ep_val = ep; const std::string var_name_val = var_name; @@ -48,18 +86,20 @@ bool RPCClient::AsyncSendVariable(const std::string& ep, var_h.scope = p_scope; var_h.name = var_name_val; var_h.ctx = p_ctx; + var_h.method = "Send"; + + VLOG(3) << var_h.String() << " begin"; // stub context SendProcessor* s = new SendProcessor(ch); s->Prepare(var_h, time_out); - s->response_call_back_ = NULL; + s->response_call_back_ = nullptr; auto call = s->stub_g_.PrepareUnaryCall( s->context_.get(), "/sendrecv.SendRecvService/SendVariable", req, &cq_); call->StartCall(); call->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); }); - req_count_++; return true; @@ -79,11 +119,10 @@ void RequestToByteBuffer(const T& proto, ::grpc::ByteBuffer* result) { result->Swap(&tmp); } -bool RPCClient::AsyncGetVariable(const std::string& ep, - const platform::DeviceContext& ctx, - const framework::Scope& scope, - const std::string& var_name, - int64_t time_out) { +bool GRPCClient::AsyncGetVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& var_name, int64_t time_out) { const platform::DeviceContext* p_ctx = &ctx; const std::string ep_val = ep; const std::string var_name_val = var_name; @@ -104,6 +143,9 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, var_h.scope = p_scope; var_h.name = var_name_val; var_h.ctx = p_ctx; + var_h.method = "Get"; + + VLOG(3) << var_h.String() << " begin"; // stub context GetProcessor* s = new GetProcessor(ch); @@ -121,12 +163,12 @@ bool RPCClient::AsyncGetVariable(const std::string& ep, return true; } -bool RPCClient::AsyncPrefetchVariable(const std::string& ep, - const platform::DeviceContext& ctx, - const framework::Scope& scope, - const std::string& in_var_name, - const std::string& out_var_name, - int64_t time_out) { +bool GRPCClient::AsyncPrefetchVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out) { const platform::DeviceContext* p_ctx = &ctx; const std::string ep_val = ep; const std::string in_var_name_val = in_var_name; @@ -147,6 +189,9 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep, var_h.scope = p_scope; var_h.name = out_var_name_val; var_h.ctx = p_ctx; + var_h.method = "Prefetch"; + + VLOG(3) << var_h.String() << " begin"; // stub context GetProcessor* s = new GetProcessor(ch); @@ -164,7 +209,8 @@ bool RPCClient::AsyncPrefetchVariable(const std::string& ep, return true; } -void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { +void GRPCClient::AsyncSendBatchBarrier(const std::string& ep, + int64_t time_out) { const auto ch = GetChannel(ep); BatchBarrierProcessor* s = new BatchBarrierProcessor(ch); @@ -177,7 +223,8 @@ void RPCClient::AsyncSendBatchBarrier(const std::string& ep, int64_t time_out) { req_count_++; } -void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { +void GRPCClient::AsyncSendFetchBarrier(const std::string& ep, + int64_t time_out) { const auto ch = GetChannel(ep); FetchBarrierProcessor* s = new FetchBarrierProcessor(ch); s->Prepare(time_out); @@ -189,79 +236,94 @@ void RPCClient::AsyncSendFetchBarrier(const std::string& ep, int64_t time_out) { req_count_++; } -bool RPCClient::Wait() { - if (req_count_ <= 0) { - return true; - } - const size_t kReqCnt = req_count_; - bool a[kReqCnt]; - std::vector> waits(req_count_); +void GRPCClient::AsyncSendComplete(const std::string& ep, int64_t time_out) { + const auto ch = GetChannel(ep); - for (int i = 0; i < req_count_; i++) { - waits[i] = framework::AsyncIO([i, &a, this] { a[i] = Proceed(); }); - } + BatchBarrierProcessor* s = new BatchBarrierProcessor(ch); + s->Prepare(time_out); - for (int i = 0; i < req_count_; i++) { - waits[i].wait(); - } + sendrecv::VariableMessage req; + req.set_varname(COMPLETE_MESSAGE); + auto rpc = s->stub_->AsyncSendVariable(s->context_.get(), req, &cq_); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); + req_count_++; +} - int last_req_count = req_count_; - req_count_ = 0; +void GRPCClient::AsyncCheckpointNotify(const std::string& ep, + const std::string& dir, + int64_t time_out) { + const auto ch = GetChannel(ep); - for (int i = 0; i < last_req_count; i++) { - if (!a[i]) { - return false; - } - } + CheckpointNotifyProcessor* s = new CheckpointNotifyProcessor(ch); + s->Prepare(time_out); - return true; -} + sendrecv::VariableMessage req; + req.set_varname(CHECKPOINT_SAVE_MESSAGE); + req.set_out_varname(dir); -bool RPCClient::Proceed() { - void* tag = NULL; - bool ok = false; + auto rpc = s->stub_->AsyncCheckpointNotify(s->context_.get(), req, &cq_); + rpc->Finish(&s->reply_, &s->status_, reinterpret_cast(s)); + req_count_++; +} - // request counts. - if (!cq_.Next(&tag, &ok)) { - LOG(ERROR) << "Get meets CompletionQueue error"; - return false; - } +bool GRPCClient::Wait() { + std::unique_lock lk(sync_mutex_); + sync_cond_.wait(lk, [this] { return (req_count_ == 0 || ok_ == false); }); + return ok_; +} - GPR_ASSERT(ok); - PADDLE_ENFORCE(tag); +void GRPCClient::Proceed() { + void* tag = nullptr; + bool ok = false; - // TODO(gongwb): add more retries. - BaseProcessor* c = static_cast(tag); - if (!c->status_.ok()) { - LOG(ERROR) << "proc param error:" << c->var_h_.String() - << " grpc error:" << c->status_.error_message(); + while (!stopped_ && cq_.Next(&tag, &ok)) { + BaseProcessor* c = static_cast(tag); + GPR_ASSERT(ok); + PADDLE_ENFORCE(c); + if (c->status_.ok()) { + VLOG(3) << c->var_h_.String() << " process"; + c->Process(); + } else if (c->status_.error_code() == grpc::StatusCode::DEADLINE_EXCEEDED) { + LOG(ERROR) << c->var_h_.String() + << " meets grpc error:" << c->status_.error_message(); + { + std::lock_guard lk(sync_mutex_); + ok_ = false; + } + sync_cond_.notify_all(); + } else { + LOG(FATAL) << c->var_h_.String() + << " meets grpc error:" << c->status_.error_message(); + } delete c; - return false; + { + std::lock_guard lk(sync_mutex_); + req_count_--; + } + sync_cond_.notify_all(); } - - c->Process(); - delete c; - return true; } -std::shared_ptr RPCClient::GetChannel(const std::string& ep) { +std::shared_ptr GRPCClient::GetChannel(const std::string& ep) { + std::lock_guard guard(chan_mutex_); auto it = channels_.find(ep); if (it != channels_.end()) { return it->second; } + // Channel configurations: grpc::ChannelArguments args; + args.SetInt(GRPC_ARG_MAX_RECONNECT_BACKOFF_MS, 2000); args.SetCompressionAlgorithm(GRPC_COMPRESS_NONE); args.SetMaxSendMessageSize(std::numeric_limits::max()); args.SetMaxReceiveMessageSize(std::numeric_limits::max()); auto ch = grpc::CreateCustomChannel(ep, grpc::InsecureChannelCredentials(), args); - channels_[ep] = ch; return ch; } -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_client.h b/paddle/fluid/operators/distributed/grpc_client.h similarity index 54% rename from paddle/fluid/operators/detail/grpc_client.h rename to paddle/fluid/operators/distributed/grpc_client.h index f6229b71bc..0c95ffeb5c 100644 --- a/paddle/fluid/operators/detail/grpc_client.h +++ b/paddle/fluid/operators/distributed/grpc_client.h @@ -16,14 +16,18 @@ limitations under the License. */ #include -#include // NOLINT +#include // NOLINT +#include // NOLINT #include #include #include #include +#include // NOLINT #include +#include // NOLINT #include +#include "grpc++/channel.h" #include "grpc++/generic/generic_stub.h" #include "grpc++/grpc++.h" #include "grpc++/support/byte_buffer.h" @@ -34,45 +38,42 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/selected_rows.h" -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/distributed/request_handler.h" +#include "paddle/fluid/operators/distributed/rpc_client.h" +#include "paddle/fluid/operators/distributed/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +#include "paddle/fluid/platform/macros.h" // for DISABLE_COPY_AND_ASSIGN namespace paddle { namespace operators { -namespace detail { - -struct VarHandle { - std::string ep; - const platform::DeviceContext* ctx; - const framework::Scope* scope; - std::string name; - - std::string String() const { - std::ostringstream s; - s << "name:[" << name << "] ep:[" << ep << "]"; - return s.str(); - } -}; +namespace distributed { void ProcGetResponse(const VarHandle& var_h, const grpc::ByteBuffer& msg); class BaseProcessor { public: - explicit BaseProcessor(std::shared_ptr ch) { context_ = NULL; } + explicit BaseProcessor(std::shared_ptr ch) { + context_ = nullptr; + } virtual ~BaseProcessor() {} virtual void Prepare(const VarHandle& var_info, int64_t time_out) { context_.reset(new grpc::ClientContext()); var_h_ = var_info; - - std::chrono::system_clock::time_point deadline = - std::chrono::system_clock::now() + std::chrono::milliseconds(time_out); - - context_->set_deadline(deadline); + context_->set_wait_for_ready(true); + if (time_out) { + std::chrono::system_clock::time_point deadline = + std::chrono::system_clock::now() + + std::chrono::milliseconds(time_out); + context_->set_deadline(deadline); + } } virtual void Prepare(int64_t time_out) { context_.reset(new grpc::ClientContext()); + context_->set_wait_for_ready(true); std::chrono::system_clock::time_point deadline = std::chrono::system_clock::now() + std::chrono::milliseconds(time_out); @@ -105,7 +106,7 @@ class SendProcessor : public BaseProcessor { ::grpc::GenericStub stub_g_; ::grpc::ByteBuffer reply_; - RequestSendCallBack response_call_back_ = NULL; + RequestSendCallBack response_call_back_ = nullptr; }; typedef std::function @@ -157,45 +158,89 @@ class FetchBarrierProcessor : public BaseProcessor { std::unique_ptr stub_; }; -class RPCClient { +class CheckpointNotifyProcessor : public BaseProcessor { public: - bool AsyncSendVariable(const std::string& ep, - const platform::DeviceContext& ctx, - const framework::Scope& scope, - const std::string& var_name, - int64_t time_out = 600 * 1000); + explicit CheckpointNotifyProcessor(std::shared_ptr ch) + : BaseProcessor(ch) { + stub_ = sendrecv::SendRecvService::NewStub(ch); + } + + virtual ~CheckpointNotifyProcessor() {} - bool AsyncGetVariable(const std::string& ep, + virtual void Process() {} + sendrecv::VoidMessage reply_; + std::unique_ptr stub_; +}; + +class GRPCClient : public RPCClient { + public: + GRPCClient() : ok_(true), completed_(false), stopped_(false) {} + virtual ~GRPCClient(); + + bool AsyncSendVar(const std::string& ep, const platform::DeviceContext& ctx, + const framework::Scope& scope, const std::string& var_name, + int64_t time_out = FLAGS_rpc_deadline) override; + + bool AsyncGetVar(const std::string& ep, const platform::DeviceContext& ctx, + const framework::Scope& scope, const std::string& var_name, + int64_t time_out = FLAGS_rpc_deadline) override; + + bool AsyncPrefetchVar(const std::string& ep, const platform::DeviceContext& ctx, const framework::Scope& scope, - const std::string& var_name, - int64_t time_out = 600 * 1000); - - bool AsyncPrefetchVariable(const std::string& ep, - const platform::DeviceContext& ctx, - const framework::Scope& scope, - const std::string& in_var_name, - const std::string& out_var_name, - int64_t time_out = 600 * 1000); + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out = FLAGS_rpc_deadline) override; void AsyncSendBatchBarrier(const std::string& ep, - int64_t time_out = 600 * 1000); + int64_t time_out = FLAGS_rpc_deadline) override; void AsyncSendFetchBarrier(const std::string& ep, - int64_t time_out = 600 * 1000); + int64_t time_out = FLAGS_rpc_deadline) override; + + void AsyncCheckpointNotify(const std::string& ep, const std::string& dir, + int64_t time_out = FLAGS_rpc_deadline) override; + + void AsyncSendComplete(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) override; - bool Wait(); + bool Wait() override; + + void SendComplete() override; + + protected: + void InitImpl() override; private: - bool Proceed(); + // InitEventLoop should only be called by Init() + void InitEventLoop(); + + void Proceed(); + std::shared_ptr GetChannel(const std::string& ep); private: grpc::CompletionQueue cq_; - std::map> channels_; - int64_t req_count_ = 0; + std::unordered_map> channels_; + std::unique_ptr client_thread_; + + // mutex for Wait client sync + std::mutex sync_mutex_; + std::condition_variable sync_cond_; + std::atomic req_count_{0}; + bool ok_; + + // mutex for GetChannel thread safety + std::mutex chan_mutex_; + DISABLE_COPY_AND_ASSIGN(GRPCClient); + + // mutex for sending complete message only once + std::mutex completed_mutex_; + bool completed_; + + volatile bool stopped_; }; -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/distributed/grpc_serde.cc b/paddle/fluid/operators/distributed/grpc_serde.cc new file mode 100644 index 0000000000..3f8796713a --- /dev/null +++ b/paddle/fluid/operators/distributed/grpc_serde.cc @@ -0,0 +1,157 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_CUDA +#include +#endif +#include +#include // NOLINT + +#include "google/protobuf/io/coded_stream.h" +#include "google/protobuf/io/zero_copy_stream.h" +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/operators/distributed/grpc_bytebuffer_stream.h" +#include "paddle/fluid/operators/distributed/grpc_serde.h" +#include "paddle/fluid/operators/distributed/grpc_variable_response.h" +#include "paddle/fluid/operators/distributed/proto_encoder_helper.h" +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { +namespace operators { +namespace distributed { + +void SerializeToByteBuffer(const std::string& name, framework::Variable* var, + const platform::DeviceContext& ctx, + ::grpc::ByteBuffer* msg, + const std::string& out_name) { + // Default DestroyCallback does nothing, When using GPU + // the CPU buffer need to be freed. + DestroyCallback destroy_callback = [](void* backing) {}; + VarMsg request; + void* payload = nullptr; + size_t payload_size; + + request.set_varname(name); + // Note: normally the profiler is enabled in 1 trainer, hence only + // 1 trainer returns true for ShouldSendProfileState(). It tells PS + // servers the trainer's profiling state so that PS can follow the + // trainer. + if (platform::ShouldSendProfileState()) { + if (platform::IsProfileEnabled()) { + request.set_profile(platform::kEnableProfiler); + } else { + request.set_profile(platform::kDisableProfiler); + } + } + if (!out_name.empty()) { + request.set_out_varname(out_name); + } + if (var->IsType()) { + request.set_type(::sendrecv::LOD_TENSOR); + GetTensorPayload(var, ctx, &request, &payload, &payload_size); + } else if (var->IsType()) { + request.set_type(::sendrecv::SELECTED_ROWS); + GetSelectedRowsPayload(var, ctx, &request, &payload, &payload_size); +#ifdef PADDLE_WITH_CUDA + } else if (var->IsType()) { + request.set_type(::sendrecv::NCCL_ID); +#endif + } else { + PADDLE_THROW("Serialize does not support type: %s", + typeid(var->Type()).name()); + } + + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + // GPU data is copied to CPU buffer when sending, + // free the buffer when possible. + destroy_callback = [](void* backing) { + platform::CUDAPinnedPlace cuda_pinned; + memory::Free(cuda_pinned, backing); + }; +#endif + } + + std::string header; + request.AppendToString(&header); + auto buffer = std::unique_ptr(new char[1024]); + void* buf = buffer.get(); + ProtoEncodeHelper e(static_cast(buf), 1024); + e.WriteRawBytes(std::string(header.data(), header.size())); +// NCCLID is copied directly to the message, return bytebuffer +// with only one slice if serializing NCCLID. +#ifdef PADDLE_WITH_CUDA + if (var->IsType()) { + e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, + NCCL_UNIQUE_ID_BYTES); + const ncclUniqueId& uid = var->Get(); + e.WriteRawBytes(std::string(uid.internal, NCCL_UNIQUE_ID_BYTES)); + + // for serialize NCCL_ID + ::grpc::Slice slices(e.size()); + memcpy(const_cast(slices.begin()), e.data(), e.size()); + ::grpc::ByteBuffer tmp(&slices, 1); + msg->Swap(&tmp); + return; + } +#endif + + e.WriteVarlengthBeginning(VarMsg::kSerializedFieldNumber, payload_size); + // steal reference of tensor data + ::grpc::Slice slices[4]; // metadata, tensor, rows meta, rows + int num_slices = 2; // only SelectedRows have rows buffer + slices[0] = ::grpc::Slice(e.size()); + memcpy(const_cast(slices[0].begin()), e.data(), e.size()); + slices[1] = ::grpc::Slice( + grpc_slice_new_with_user_data(payload, payload_size, destroy_callback, + static_cast(payload)), + ::grpc::Slice::STEAL_REF); + + if (var->IsType()) { + auto* slr = var->GetMutable(); + ProtoEncodeHelper e2(static_cast(buf), 128); + size_t rows_memory_size = + slr->rows().size() * framework::SizeOfType(typeid(int64_t)); + e2.WriteVarlengthBeginning(VarMsg::kRowsFieldNumber, rows_memory_size); + slices[2] = ::grpc::Slice(e2.size()); + memcpy(const_cast(slices[2].begin()), e2.data(), e2.size()); + + slices[3] = ::grpc::Slice( + grpc_slice_new_with_user_data( + const_cast( + reinterpret_cast(slr->rows().data())), + rows_memory_size, [](void* backing) {}, + const_cast( + reinterpret_cast(slr->rows().data()))), + ::grpc::Slice::STEAL_REF); + num_slices = 4; + } + + ::grpc::ByteBuffer tmp(&slices[0], num_slices); + msg->Swap(&tmp); +} + +void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, + const platform::DeviceContext& ctx, + const framework::Scope* scope, + framework::Variable** var) { + operators::distributed::GRPCVariableResponse resp(scope, &ctx); + PADDLE_ENFORCE(resp.Parse(msg) == 0, "parse bytebuffer to tensor error!"); + *var = resp.GetVar(); +} + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/grpc_serde.h b/paddle/fluid/operators/distributed/grpc_serde.h new file mode 100644 index 0000000000..450c41dcd6 --- /dev/null +++ b/paddle/fluid/operators/distributed/grpc_serde.h @@ -0,0 +1,50 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" + +#include "paddle/fluid/operators/distributed/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" + +namespace paddle { +namespace operators { +namespace distributed { + +typedef void (*DestroyCallback)(void*); + +void SerializeToByteBuffer(const std::string& name, framework::Variable* var, + const platform::DeviceContext& ctx, + ::grpc::ByteBuffer* msg, + const std::string& out_varname = std::string()); + +void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, + const platform::DeviceContext& ctx, + const framework::Scope* scope, + framework::Variable** var); + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/serde_test.cc b/paddle/fluid/operators/distributed/grpc_serde_test.cc similarity index 89% rename from paddle/fluid/operators/detail/serde_test.cc rename to paddle/fluid/operators/distributed/grpc_serde_test.cc index e9eaaf1cbc..96ea05e74e 100644 --- a/paddle/fluid/operators/detail/serde_test.cc +++ b/paddle/fluid/operators/distributed/grpc_serde_test.cc @@ -21,8 +21,10 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/variable.h" -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" -#include "paddle/fluid/operators/detail/variable_response.h" +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/operators/distributed/grpc_serde.h" +#include "paddle/fluid/operators/distributed/grpc_variable_response.h" +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/printf.h" @@ -50,7 +52,7 @@ void RunSerdeTestSelectedRows(platform::Place place) { for (int i = 0; i < 564; ++i) rows->push_back(i); ::grpc::ByteBuffer msg; - operators::detail::SerializeToByteBuffer("myvar", &var, ctx, &msg); + operators::distributed::SerializeToByteBuffer("myvar", &var, ctx, &msg); EXPECT_GT(msg.Length(), static_cast(0)); // deserialize @@ -81,10 +83,10 @@ void RunSerdeTestSelectedRows(platform::Place place) { // deserialize zero-copy // framework::Variable var2; - // operators::detail::DeserializeFromByteBuffer(msg, ctx, &var2); + // operators::distributed::DeserializeFromByteBuffer(msg, ctx, &var2); framework::Scope scope; scope.Var("myvar"); - operators::detail::VariableResponse resp(&scope, &ctx); + operators::distributed::GRPCVariableResponse resp(&scope, &ctx); EXPECT_EQ(resp.Parse(msg), 0); framework::Variable* var2 = resp.GetVar(); @@ -117,18 +119,18 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { // serialize var to ByteBuffer framework::Variable var; auto* tensor = var.GetMutable(); - tensor->Resize(framework::make_ddim({4, 8, 4, 2})); + tensor->Resize(framework::make_ddim({512, 8, 4, 2})); framework::LoD lod; lod.push_back(framework::Vector({1, 3, 8})); tensor->set_lod(lod); - int tensor_numel = 4 * 8 * 4 * 2; + int tensor_numel = 512 * 8 * 4 * 2; platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); tensor->mutable_data(place); math::set_constant(ctx, tensor, 31.9); ::grpc::ByteBuffer msg; - operators::detail::SerializeToByteBuffer("myvar", &var, ctx, &msg); + operators::distributed::SerializeToByteBuffer("myvar", &var, ctx, &msg); EXPECT_GT(msg.Length(), static_cast(0)); // deserialize @@ -142,7 +144,7 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { EXPECT_TRUE(varmsg.ParseFromString(tmp)); EXPECT_EQ(varmsg.varname(), "myvar"); EXPECT_EQ(varmsg.type(), 0); - EXPECT_EQ(varmsg.dims()[0], 4); + EXPECT_EQ(varmsg.dims()[0], 512); EXPECT_EQ(varmsg.dims()[1], 8); EXPECT_EQ(varmsg.dims()[2], 4); EXPECT_EQ(varmsg.dims()[3], 2); @@ -171,7 +173,7 @@ void RunTestLodTensor(platform::Place place, int from_type = 0) { // deserialize zero-copy framework::Scope scope; scope.Var("myvar"); - operators::detail::VariableResponse resp(&scope, &ctx); + operators::distributed::GRPCVariableResponse resp(&scope, &ctx); if (from_type == 0) { EXPECT_EQ(resp.Parse(msg), 0); } else { diff --git a/paddle/fluid/operators/distributed/grpc_server.cc b/paddle/fluid/operators/distributed/grpc_server.cc new file mode 100644 index 0000000000..8edb00276d --- /dev/null +++ b/paddle/fluid/operators/distributed/grpc_server.cc @@ -0,0 +1,415 @@ +/*Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/operators/distributed/grpc_serde.h" +#include "paddle/fluid/operators/distributed/grpc_server.h" + +using ::grpc::ServerAsyncResponseWriter; + +namespace paddle { +namespace operators { +namespace distributed { +enum CallStatus { PROCESS = 0, FINISH }; + +// reference: +// https://stackoverflow.com/questions/41732884/grpc-multiple-services-in-cpp-async-server +class RequestBase { + public: + explicit RequestBase(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : service_(service), + cq_(cq), + status_(PROCESS), + request_handler_(request_handler), + req_id_(req_id) { + PADDLE_ENFORCE(cq_); + } + virtual ~RequestBase() {} + virtual void Process() = 0; + + std::string Status2String(const std::string& method) { + std::string status = "Process"; + if (status_ == FINISH) { + status = "Finish"; + } + + std::ostringstream s; + s << method << " name:[" << GetReqName() << "]" + << ", ep:[" << ctx_.peer() << "]" + << " " << status << " using req_id:" << req_id_; + return s.str(); + } + + CallStatus Status() const { + std::lock_guard l(status_mu_); + return status_; + } + + template + void Finish(const T& reply, ServerAsyncResponseWriter* responder) { + std::lock_guard l(status_mu_); + status_ = FINISH; + responder->Finish(reply, ::grpc::Status::OK, + reinterpret_cast(static_cast(req_id_))); + } + virtual std::string GetReqName() = 0; + + protected: + mutable std::mutex status_mu_; + ::grpc::ServerContext ctx_; + GrpcService::AsyncService* service_; + ::grpc::ServerCompletionQueue* cq_; + CallStatus status_; + RequestHandler* request_handler_; + int req_id_; +}; + +class RequestSend final : public RequestBase { + public: + explicit RequestSend(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), responder_(&ctx_) { + request_.reset(new GRPCVariableResponse(request_handler->scope(), + request_handler->dev_ctx(), + !request_handler->sync_mode())); + int method_id = static_cast(distributed::GrpcMethod::kSendVariable); + service_->RequestAsyncUnary( + method_id, &ctx_, request_.get(), &responder_, cq_, cq_, + reinterpret_cast(static_cast(req_id))); + } + virtual ~RequestSend() {} + std::string GetReqName() override { return request_->Varname(); } + + void Process() override { + std::string varname = GetReqName(); + VLOG(4) << "RequestSend var_name:" << varname; + + auto scope = request_->GetMutableLocalScope(); + auto invar = request_->GetVar(); + framework::Variable* outvar = nullptr; + + request_handler_->Handle(varname, scope, invar, &outvar); + Finish(reply_, &responder_); + } + + protected: + sendrecv::VoidMessage reply_; + std::shared_ptr request_; + ServerAsyncResponseWriter responder_; +}; + +class RequestGet final : public RequestBase { + public: + explicit RequestGet(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), responder_(&ctx_) { + auto method_id = static_cast(distributed::GrpcMethod::kGetVariable); + service_->RequestAsyncUnary( + method_id, &ctx_, &request_, &responder_, cq_, cq_, + reinterpret_cast(static_cast(req_id))); + } + + virtual ~RequestGet() {} + + std::string GetReqName() override { return request_.varname(); } + + void Process() override { + // proc request. + std::string varname = request_.varname(); + VLOG(4) << "RequestGet " << varname; + + auto scope = request_handler_->scope(); + auto invar = scope->FindVar(varname); + framework::Variable* outvar = nullptr; + + request_handler_->Handle(varname, scope, invar, &outvar); + + if (outvar) { + SerializeToByteBuffer(varname, outvar, *request_handler_->dev_ctx(), + &reply_); + } + Finish(reply_, &responder_); + } + + protected: + sendrecv::VariableMessage request_; + ::grpc::ByteBuffer reply_; + ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; +}; + +class RequestPrefetch final : public RequestBase { + public: + explicit RequestPrefetch(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), + responder_(&ctx_), + local_scope_(nullptr) { + request_.reset(new GRPCVariableResponse(request_handler->scope(), + request_handler->dev_ctx(), true)); + int method_id = + static_cast(distributed::GrpcMethod::kPrefetchVariable); + service_->RequestAsyncUnary( + method_id, &ctx_, request_.get(), &responder_, cq_, cq_, + reinterpret_cast(static_cast(req_id))); + } + + virtual ~RequestPrefetch() {} + + std::string GetReqName() override { return request_->Varname(); } + + void Process() override { + // prefetch process... + std::string in_var_name = request_->Varname(); + std::string out_var_name = request_->OutVarname(); + VLOG(4) << "RequestPrefetch, in_var_name: " << in_var_name + << " out_var_name: " << out_var_name; + + auto scope = request_->GetMutableLocalScope(); + auto invar = scope->FindVar(in_var_name); + // out var must be created in local scope! + framework::Variable* outvar = scope->Var(out_var_name); + + request_handler_->Handle(in_var_name, scope, invar, &outvar, out_var_name); + + SerializeToByteBuffer(out_var_name, outvar, *request_handler_->dev_ctx(), + &reply_); + Finish(reply_, &responder_); + } + + protected: + std::shared_ptr request_; + ::grpc::ByteBuffer reply_; + ServerAsyncResponseWriter<::grpc::ByteBuffer> responder_; + framework::Scope* local_scope_; +}; + +class RequestCheckpointNotify final : public RequestBase { + public: + explicit RequestCheckpointNotify(GrpcService::AsyncService* service, + ::grpc::ServerCompletionQueue* cq, + RequestHandler* request_handler, int req_id) + : RequestBase(service, cq, request_handler, req_id), responder_(&ctx_) { + request_.reset(new GRPCVariableResponse(request_handler->scope(), + request_handler->dev_ctx())); + int method_id = + static_cast(distributed::GrpcMethod::kCheckpointNotify); + service_->RequestAsyncUnary( + method_id, &ctx_, request_.get(), &responder_, cq_, cq_, + reinterpret_cast(static_cast(req_id))); + } + + virtual ~RequestCheckpointNotify() {} + + std::string GetReqName() override { return request_->Varname(); } + + void Process() override { + auto scope = request_->GetMutableLocalScope(); + + std::string checkpoint_notify = request_->Varname(); + std::string checkpoint_dir = request_->OutVarname(); + + VLOG(4) << "RequestCheckpointNotify notify: " << checkpoint_notify + << ", dir: " << checkpoint_dir; + + request_handler_->Handle(checkpoint_notify, scope, nullptr, nullptr, + checkpoint_dir); + Finish(reply_, &responder_); + } + + protected: + std::shared_ptr request_; + sendrecv::VoidMessage reply_; + ServerAsyncResponseWriter responder_; +}; + +void AsyncGRPCServer::WaitServerReady() { + VLOG(4) << "AsyncGRPCServer is wait server ready"; + std::unique_lock lock(this->mutex_ready_); + condition_ready_.wait(lock, [=] { return this->ready_ == 1; }); + VLOG(4) << "AsyncGRPCServer WaitSeverReady"; +} + +void AsyncGRPCServer::StartServer() { + ::grpc::ServerBuilder builder; + builder.AddListeningPort(bind_address_, ::grpc::InsecureServerCredentials(), + &selected_port_); + + builder.SetMaxSendMessageSize(std::numeric_limits::max()); + builder.SetMaxReceiveMessageSize(std::numeric_limits::max()); + builder.RegisterService(&service_); + + for (auto t : rpc_call_map_) { + rpc_cq_[t.first].reset(builder.AddCompletionQueue().release()); + } + + server_ = builder.BuildAndStart(); + LOG(INFO) << "Server listening on " << bind_address_ + << " selected port: " << selected_port_; + + std::function f = + std::bind(&AsyncGRPCServer::TryToRegisterNewOne, this, + std::placeholders::_1, std::placeholders::_2); + + for (auto& t : rpc_call_map_) { + auto& rpc_name = t.first; + auto& cq = rpc_cq_[rpc_name]; + auto threadnum = rpc_thread_num_[rpc_name]; + auto& reqs = rpc_reqs_[rpc_name]; + + reqs.reserve(kRequestBufSize); + + for (int i = 0; i < kRequestBufSize; i++) { + VLOG(6) << "TryToRegisterNewOne on RPC NAME: " << rpc_name << " I: " << i; + TryToRegisterNewOne(rpc_name, i); + } + + for (int i = 0; i < threadnum; i++) { + rpc_threads_[rpc_name].emplace_back(new std::thread(std::bind( + &AsyncGRPCServer::HandleRequest, this, cq.get(), rpc_name, f))); + VLOG(4) << t.first << " creates threads!"; + } + } + + { + std::lock_guard lock(this->mutex_ready_); + ready_ = 1; + } + condition_ready_.notify_all(); + + // wait server + server_->Wait(); + + for (auto& t : rpc_threads_) { + auto& threads = t.second; + for (size_t i = 0; i < threads.size(); ++i) { + threads[i]->join(); + VLOG(4) << t.first << " threads ends!"; + } + } +} + +void AsyncGRPCServer::ShutdownQueue() { + for (auto& t : rpc_cq_) { + t.second->Shutdown(); + VLOG(4) << t.first << " queue shutdown!"; + } +} + +void AsyncGRPCServer::ShutDownImpl() { + std::unique_lock lock(cq_mutex_); + is_shut_down_ = true; + ShutdownQueue(); + + VLOG(4) << "server_ shutdown!"; + server_->Shutdown(); +} + +void AsyncGRPCServer::TryToRegisterNewOne(const std::string& rpc_name, + int req_id) { + std::unique_lock lock(cq_mutex_); + if (is_shut_down_) { + VLOG(4) << "shutdown, do not TryToRegisterNewSendOne"; + return; + } + + VLOG(4) << "TryToRegisterNewOne on RPC NAME: " << rpc_name + << " REQ ID: " << req_id; + + auto& reqs = rpc_reqs_[rpc_name]; + auto& handler = rpc_call_map_[rpc_name]; + auto& cq = rpc_cq_[rpc_name]; + + RequestBase* b = nullptr; + if (rpc_name == kRequestSend) { + b = new RequestSend(&service_, cq.get(), handler, req_id); + } else if (rpc_name == kRequestGet) { + b = new RequestGet(&service_, cq.get(), handler, req_id); + } else if (rpc_name == kRequestPrefetch) { + b = new RequestPrefetch(&service_, cq.get(), handler, req_id); + } else if (rpc_name == kRequestCheckpoint) { + b = new RequestCheckpointNotify(&service_, cq.get(), handler, req_id); + } else { + PADDLE_ENFORCE(false, "not supported rpc"); + } + + reqs[req_id] = b; + + VLOG(4) << "Create RequestSend status:" << b->Status(); +} + +void AsyncGRPCServer::HandleRequest( + ::grpc::ServerCompletionQueue* cq, const std::string& rpc_name, + std::function TryToRegisterNewOne) { + void* tag = NULL; + bool ok = false; + + while (true) { + VLOG(4) << "HandleRequest " << rpc_name << " wait next"; + if (!cq->Next(&tag, &ok)) { + VLOG(3) << "CompletionQueue " << rpc_name << " shutdown!"; + break; + } + + int req_id = static_cast(reinterpret_cast(tag)); + VLOG(4) << "HandleRequest " << rpc_name << ", req_id:" << req_id + << " get next"; + + auto& reqs = rpc_reqs_[rpc_name]; + RequestBase* base = nullptr; + { + PADDLE_ENFORCE(req_id >= 0 && req_id < kRequestBufSize); + std::unique_lock lock(cq_mutex_); + base = reqs[req_id]; + } + + VLOG(3) << base->Status2String(rpc_name); + + // reference: + // https://github.com/tensorflow/tensorflow/issues/5596 + // https://groups.google.com/forum/#!topic/grpc-io/xftlRy-IQwM + // https://groups.google.com/forum/#!topic/grpc-io/ywATt88Ef_I + if (!ok) { + LOG(WARNING) << "completion queue:" << rpc_name + << " recv no regular event" + << " context:" << base->Status2String(rpc_name); + TryToRegisterNewOne(rpc_name, req_id); + delete base; + continue; + } + + switch (base->Status()) { + case PROCESS: { + base->Process(); + break; + } + case FINISH: { + TryToRegisterNewOne(rpc_name, req_id); + delete base; + break; + } + default: { assert(false); } + } + } +} + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/grpc_server.h b/paddle/fluid/operators/distributed/grpc_server.h new file mode 100644 index 0000000000..d2524f5e65 --- /dev/null +++ b/paddle/fluid/operators/distributed/grpc_server.h @@ -0,0 +1,89 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include +#include // NOLINT +#include +#include + +#include "grpc++/grpc++.h" +#include "paddle/fluid/framework/blocking_queue.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/distributed/grpc_service.h" +#include "paddle/fluid/operators/distributed/request_handler.h" +#include "paddle/fluid/operators/distributed/rpc_server.h" +#include "paddle/fluid/operators/distributed/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { +namespace operators { +namespace distributed { + +class RequestBase; + +class AsyncGRPCServer final : public RPCServer { + public: + explicit AsyncGRPCServer(const std::string& address, int client_num) + : RPCServer(address, client_num), ready_(0) {} + + virtual ~AsyncGRPCServer() {} + void WaitServerReady() override; + void StartServer() override; + + private: + // HandleRequest needs to be thread-safe. + void HandleRequest( + ::grpc::ServerCompletionQueue* cq, const std::string& rpc_name, + std::function TryToRegisterNewOne); + + void TryToRegisterNewOne(const std::string& rpc_name, int req_id); + void ShutdownQueue(); + void ShutDownImpl() override; + + private: + static const int kRequestBufSize = 100; + + std::mutex cq_mutex_; + volatile bool is_shut_down_ = false; + + GrpcService::AsyncService service_; + std::unique_ptr<::grpc::Server> server_; + + // condition of the sub program + std::condition_variable barrier_condition_; + + std::mutex mutex_ready_; + std::condition_variable condition_ready_; + + int ready_; + + std::map> rpc_cq_; + std::map>> rpc_threads_; + std::map> rpc_reqs_; +}; + +}; // namespace distributed +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detail/grpc_service.h b/paddle/fluid/operators/distributed/grpc_service.h similarity index 82% rename from paddle/fluid/operators/detail/grpc_service.h rename to paddle/fluid/operators/distributed/grpc_service.h index e6dab2f5a3..9ae9a31a00 100644 --- a/paddle/fluid/operators/detail/grpc_service.h +++ b/paddle/fluid/operators/distributed/grpc_service.h @@ -23,7 +23,8 @@ #include #include #include -#include "paddle/fluid/operators/detail/variable_response.h" +#include "paddle/fluid/operators/distributed/grpc_variable_response.h" +#include "paddle/fluid/platform/profiler.h" // NOTE: This method was originally created by tensorflow // (https://github.com/tensorflow/tensorflow/) we borrow this @@ -40,24 +41,26 @@ class ServerContext; // Support parsing/unparsing of tensorflow::VariableResponse. // Wire-format is identical to RecvVariableResponse. template <> -class SerializationTraits { +class SerializationTraits< + paddle::operators::distributed::GRPCVariableResponse> { public: static Status Serialize( - const paddle::operators::detail::VariableResponse& msg, + const paddle::operators::distributed::GRPCVariableResponse& msg, grpc_byte_buffer** bp, bool* own_buffer) { PADDLE_ENFORCE(false, "SerializationTraits::Serialize not implemented!"); return Status(); } - static Status Deserialize(grpc_byte_buffer* buffer, - paddle::operators::detail::VariableResponse* msg, - int max_message_size = INT_MAX) { + static Status Deserialize( + grpc_byte_buffer* buffer, + paddle::operators::distributed::GRPCVariableResponse* msg, + int max_message_size = INT_MAX) { if (buffer == nullptr) { return Status(StatusCode::INTERNAL, "No payload"); } Status result = g_core_codegen_interface->ok(); if (result.ok()) { - paddle::operators::detail::GrpcByteSource source(buffer); + paddle::operators::distributed::GrpcByteSource source(buffer); int ret = msg->Parse(&source); if (ret != 0) { result = Status(StatusCode::INTERNAL, "VariableResponse parse error"); @@ -71,16 +74,17 @@ class SerializationTraits { namespace paddle { namespace operators { -namespace detail { +namespace distributed { enum class GrpcMethod { kSendVariable, kGetVariable, kPrefetchVariable, + kCheckpointNotify, }; static const int kGrpcNumMethods = - static_cast(GrpcMethod::kPrefetchVariable) + 1; + static_cast(GrpcMethod::kCheckpointNotify) + 1; inline const char* GrpcMethodName(GrpcMethod id) { switch (id) { @@ -90,6 +94,8 @@ inline const char* GrpcMethodName(GrpcMethod id) { return "/sendrecv.SendRecvService/GetVariable"; case GrpcMethod::kPrefetchVariable: return "/sendrecv.SendRecvService/PrefetchVariable"; + case GrpcMethod::kCheckpointNotify: + return "/sendrecv.SendRecvService/CheckpointNotify"; } // Shouldn't be reached. @@ -116,6 +122,6 @@ class GrpcService final { }; }; -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/detail/variable_response.cc b/paddle/fluid/operators/distributed/grpc_variable_response.cc similarity index 53% rename from paddle/fluid/operators/detail/variable_response.cc rename to paddle/fluid/operators/distributed/grpc_variable_response.cc index f4a374d56d..34d47f3ec0 100644 --- a/paddle/fluid/operators/detail/variable_response.cc +++ b/paddle/fluid/operators/distributed/grpc_variable_response.cc @@ -12,19 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/operators/detail/variable_response.h" - #include #include #include -#include "paddle/fluid/platform/profiler.h" +#ifdef PADDLE_WITH_CUDA +#include +#endif -#include "paddle/fluid/operators/detail/send_recv.pb.h" -#include "paddle/fluid/operators/detail/sendrecvop_utils.h" +#include "paddle/fluid/operators/distributed/grpc_variable_response.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { -namespace detail { +namespace distributed { enum WireType { WIRETYPE_VARINT = 0, @@ -48,143 +48,12 @@ bool ReadVarintSizeAsInt(::google::protobuf::io::CodedInputStream* input, } } -bool ReadRaw(::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& dev_ctx, platform::Place place, - void* dest, int size) { - const void* data = NULL; - int size_to_write = 0; - int length = size; - int total_written = 0; - - if (platform::is_gpu_place(place)) { -#ifdef PADDLE_WITH_CUDA - auto& gpu_dev_ctx = - static_cast(dev_ctx); - platform::CPUPlace cpu; - - char* p = reinterpret_cast(dest); - while (total_written < length) { - if (!input->GetDirectBufferPointer(&data, &size_to_write)) { - return false; - } - // NOTE: if raw buffer is large and have two neighbor fields of raw - // buffers GetDirectBufferPointer can get all of them, use length to - // truncate it. - if (total_written + size_to_write > length) { - size_to_write = length - total_written; - } - memory::Copy(boost::get(place), - reinterpret_cast(p), cpu, data, size_to_write, - gpu_dev_ctx.stream()); - p += size_to_write; - total_written += size_to_write; - - input->Skip(size_to_write); - } - gpu_dev_ctx.Wait(); -#else - PADDLE_THROW("Unexpected branch"); -#endif - return true; - } - - char* p = reinterpret_cast(dest); - while (total_written < length) { - if (!input->GetDirectBufferPointer(&data, &size_to_write)) { - return false; - } - // NOTE: if raw buffer is large and have two neighbor fields of raw buffers - // GetDirectBufferPointer can get all of them, use length to truncate it. - if (total_written + size_to_write > length) { - size_to_write = length - total_written; - } - // TODO(gongwb): can we avoid copy? - platform::CPUPlace cpu; - memory::Copy(cpu, reinterpret_cast(p), cpu, data, size_to_write); - - p += size_to_write; - total_written += size_to_write; - - input->Skip(size_to_write); - } - - return true; -} - -bool VariableResponse::CopyLodTensorData( - ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, const framework::DDim& dims, - int length) { - auto* tensor = GetVar()->GetMutable(); - tensor->Resize(dims); - - framework::LoD lod; - for (int i = 0; i < meta_.lod_level(); ++i) { - framework::Vector v; - for (int j = 0; j < meta_.lod(i).lod_data_size(); ++j) { - v.push_back(meta_.lod(i).lod_data(j)); - } - lod.push_back(v); - } - tensor->set_lod(lod); - - void* tensor_data = - tensor->mutable_data(ctx.GetPlace(), ToTypeIndex(meta_.data_type())); - - if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { - return false; - } - - return true; -} - -inline framework::DDim GetDims( - const ::google::protobuf::RepeatedField<::google::protobuf::int64>& dims) { - std::vector vecdims; - for (auto& d : dims) { - vecdims.push_back(d); - } - return framework::make_ddim(vecdims); -} - -bool VariableResponse::CopySelectRowsTensorData( - ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, const framework::DDim& dims, - int length) { - auto* slr = GetVar()->GetMutable(); - slr->set_height(meta_.slr_height()); - auto* tensor = slr->mutable_value(); - tensor->Resize(dims); - PADDLE_ENFORCE_EQ( - static_cast(tensor->numel()), - length / framework::SizeOfType( - paddle::operators::detail::ToTypeIndex(meta_.data_type()))); - void* tensor_data = tensor->mutable_data( - ctx.GetPlace(), - paddle::operators::detail::ToTypeIndex(meta_.data_type())); - - if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { - return false; - } - - return true; -} - -bool VariableResponse::CopySelectRowsData( - ::google::protobuf::io::CodedInputStream* input, - const platform::DeviceContext& ctx, int length) { - auto* slr = GetVar()->GetMutable(); - slr->mutable_rows()->resize(length / - framework::SizeOfType(typeid(int64_t))); // int64 - int64_t* rows_data = slr->mutable_rows()->data(); - - // copy rows CPU data, GPU data will be copied lazily. - platform::CPUPlace cpu; - if (!ReadRaw(input, ctx, cpu, rows_data, length)) { - return false; - } +int GRPCVariableResponse::Parse(const ::grpc::ByteBuffer& byte_buffer) { + GrpcByteBufferSource source; + source.Init(byte_buffer); + GrpcByteBufferSourceWrapper r(&source); - return true; + return Parse(&r); } bool ParseLodData(::google::protobuf::io::CodedInputStream* input, @@ -210,15 +79,15 @@ bool ParseLodData(::google::protobuf::io::CodedInputStream* input, } if (wt == WIRETYPE_LENGTH_DELIMITED) { - int length = 0; - if (!input->ReadVarintSizeAsInt(&length)) { + int num_bytes = 0; + if (!input->ReadVarintSizeAsInt(&num_bytes)) { return tag; } - - for (int i = 0; i < length; i++) { + int start_pos = input->CurrentPosition(); + while (input->CurrentPosition() - start_pos < num_bytes) { uint64_t v; if (!input->ReadVarint64(&v)) { - return false; + return tag; } lod->push_back(v); } @@ -234,15 +103,7 @@ bool ParseLodData(::google::protobuf::io::CodedInputStream* input, return true; } -int VariableResponse::Parse(const ::grpc::ByteBuffer& byte_buffer) { - GrpcByteBufferSource source; - source.Init(byte_buffer); - GrpcByteBufferSourceWrapper r(&source); - - return Parse(&r); -} - -int VariableResponse::Parse(Source* source) { +int GRPCVariableResponse::Parse(Source* source) { ::google::protobuf::io::ZeroCopyInputStream* input_stream = source->contents(); ::google::protobuf::io::CodedInputStream input(input_stream); @@ -275,8 +136,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kTypeFieldNumber: { - uint64_t v; - if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + uint32_t v; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) { return tag; } @@ -284,8 +145,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kDataTypeFieldNumber: { - uint64_t v = 0; - if ((wt != WIRETYPE_VARINT) || !input.ReadVarint64(&v)) { + uint32_t v = 0; + if ((wt != WIRETYPE_VARINT) || !input.ReadVarint32(&v)) { return tag; } @@ -305,11 +166,12 @@ int VariableResponse::Parse(Source* source) { // packed if (wt == WIRETYPE_LENGTH_DELIMITED) { - int length = 0; - if (!input.ReadVarintSizeAsInt(&length)) { + int num_bytes = 0; + if (!input.ReadVarintSizeAsInt(&num_bytes)) { return tag; } - for (int i = 0; i < length; i++) { + int start_pos = input.CurrentPosition(); + while (input.CurrentPosition() - start_pos < num_bytes) { uint64_t v; if (!input.ReadVarint64(&v)) { return tag; @@ -318,7 +180,6 @@ int VariableResponse::Parse(Source* source) { } break; } - return tag; } case sendrecv::VariableMessage::kLodLevelFieldNumber: { @@ -345,7 +206,7 @@ int VariableResponse::Parse(Source* source) { } if (!input.DecrementRecursionDepthAndPopLimit(p.first)) { - return false; + return tag; } if (lod_data.size() == 0) { @@ -367,35 +228,17 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kSerializedFieldNumber: { - PADDLE_ENFORCE((meta_.type() == sendrecv::SELECTED_ROWS || - meta_.type() == sendrecv::LOD_TENSOR) && - meta_.varname() != "", - "meta info should be got first!"); - - int length = 0; + int num_bytes = 0; if (wt != WIRETYPE_LENGTH_DELIMITED || - !ReadVarintSizeAsInt(&input, &length)) { + !ReadVarintSizeAsInt(&input, &num_bytes)) { return tag; } - framework::DDim dims = GetDims(meta_.dims()); - if (meta_.type() == sendrecv::LOD_TENSOR) { - PADDLE_ENFORCE(meta_.lod_size() >= 0, - "lod info should be got first!"); - if (!CopyLodTensorData(&input, *dev_ctx_, dims, length)) { - return tag; - } - break; - } - - if (meta_.type() == sendrecv::SELECTED_ROWS) { - if (!CopySelectRowsTensorData(&input, *dev_ctx_, dims, length)) { - return tag; - } - break; + if (!ProcSerializedField(tag, &input, num_bytes)) { + return tag; } - return tag; + break; } case sendrecv::VariableMessage::kRowsFieldNumber: { PADDLE_ENFORCE((meta_.type() == sendrecv::SELECTED_ROWS || @@ -403,13 +246,13 @@ int VariableResponse::Parse(Source* source) { meta_.varname() != "", "meta info should be got first!"); - int length = 0; + int num_bytes = 0; if (wt != WIRETYPE_LENGTH_DELIMITED || - !ReadVarintSizeAsInt(&input, &length)) { + !ReadVarintSizeAsInt(&input, &num_bytes)) { return tag; } - if (!CopySelectRowsData(&input, *dev_ctx_, length)) { + if (!CopySelectRowsData(&input, *dev_ctx_, num_bytes)) { return tag; } break; @@ -429,8 +272,8 @@ int VariableResponse::Parse(Source* source) { break; } case sendrecv::VariableMessage::kProfileFieldNumber: { - bool profiling; - if (!input.ReadRaw(reinterpret_cast(&profiling), 1)) { + uint64_t profiling = 0; + if (!input.ReadVarint64(&profiling)) { return tag; } meta_.set_profile(profiling); @@ -438,9 +281,11 @@ int VariableResponse::Parse(Source* source) { if (listener_id <= 0) { break; } - if (profiling && !platform::IsProfileEnabled()) { + if (profiling == platform::kEnableProfiler && + !platform::IsProfileEnabled()) { platform::EnableProfiler(platform::ProfilerState::kCPU); - } else if (!profiling && platform::IsProfileEnabled()) { + } else if (profiling == platform::kDisableProfiler && + platform::IsProfileEnabled()) { // TODO(panyx0718): Should we allow to customize file dir. platform::DisableProfiler( platform::EventSortingKey::kDefault, @@ -458,6 +303,6 @@ int VariableResponse::Parse(Source* source) { return 0; } -}; // namespace detail +}; // namespace distributed }; // namespace operators }; // namespace paddle diff --git a/paddle/fluid/operators/distributed/grpc_variable_response.h b/paddle/fluid/operators/distributed/grpc_variable_response.h new file mode 100644 index 0000000000..89df07c92c --- /dev/null +++ b/paddle/fluid/operators/distributed/grpc_variable_response.h @@ -0,0 +1,58 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" + +#include "paddle/fluid/operators/distributed/send_recv.grpc.pb.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" + +#include "google/protobuf/io/coded_stream.h" +#include "google/protobuf/io/zero_copy_stream.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/distributed/grpc_bytebuffer_stream.h" +#include "paddle/fluid/operators/distributed/variable_response.h" + +namespace paddle { +namespace operators { +namespace distributed { + +class GRPCVariableResponse : public VariableResponse { + public: + GRPCVariableResponse(const framework::Scope* scope, + const platform::DeviceContext* dev_ctx, + bool create_scope = false) + : VariableResponse(scope, dev_ctx, create_scope) {} + + virtual ~GRPCVariableResponse() {} + + int Parse(Source* source) override; + + // return: + // 0:ok. + // -1: unkown error. + // other: number of error field. + int Parse(const ::grpc::ByteBuffer& byte_buffer); +}; + +}; // namespace distributed +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detail/proto_encoder_helper.h b/paddle/fluid/operators/distributed/proto_encoder_helper.h similarity index 98% rename from paddle/fluid/operators/detail/proto_encoder_helper.h rename to paddle/fluid/operators/distributed/proto_encoder_helper.h index d91d054b25..2fab02e32f 100644 --- a/paddle/fluid/operators/detail/proto_encoder_helper.h +++ b/paddle/fluid/operators/distributed/proto_encoder_helper.h @@ -26,7 +26,7 @@ limitations under the License. */ namespace paddle { namespace operators { -namespace detail { +namespace distributed { char* EncodeVarint32(char* dst, uint32_t v) { // Operate on characters as unsigneds @@ -144,6 +144,6 @@ class ProtoEncodeHelper { char* limit_; // Just for CHECKs }; -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/distributed/request_handler.h b/paddle/fluid/operators/distributed/request_handler.h new file mode 100644 index 0000000000..64ac728184 --- /dev/null +++ b/paddle/fluid/operators/distributed/request_handler.h @@ -0,0 +1,158 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" + +namespace paddle { +namespace operators { +namespace distributed { + +constexpr char kRequestSend[] = "RequestSend"; +constexpr char kRequestGet[] = "RequestGet"; +constexpr char kRequestPrefetch[] = "RequestPrefetch"; +constexpr char kRequestCheckpoint[] = "RequestCheckpoint"; +constexpr char kRequestPassBarrier[] = "RequestPassBarrier"; + +#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" +#define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV" +#define FETCH_BARRIER_MESSAGE "FETCH_BARRIER@RECV" +#define COMPLETE_MESSAGE "COMPLETE@RECV" + +#define CHECKPOINT_SAVE_MESSAGE "SAVE@CHECKPOINTNOTIFY" +#define CHECKPOINT_LOAD_MESSAGE "LOAD@CHECKPOINTNOTIFY" + +class RPCServer; + +struct VarHandle { + // RPC endpoint. + std::string ep; + const platform::DeviceContext* ctx; + const framework::Scope* scope; + // Variable name. + std::string name; + // RPC method name. + std::string method; + + std::string String() const { + std::ostringstream s; + s << method << " name:[" << name << "], ep:[" << ep << "]"; + return s.str(); + } +}; + +class RequestHandler { + public: + explicit RequestHandler(bool sync_mode) + : sync_mode_(sync_mode), + dev_ctx_(nullptr), + executor_(nullptr), + scope_(nullptr), + program_(nullptr), + rpc_server_(nullptr) {} + + virtual ~RequestHandler() {} + + // Set attributes. + void SetScope(framework::Scope* scope) { scope_ = scope; } + void SetDevCtx(const platform::DeviceContext* dev_ctx) { dev_ctx_ = dev_ctx; } + void SetProgram(framework::ProgramDesc* program) { program_ = program; } + void SetExecutor(framework::Executor* executor) { executor_ = executor; } + + // Used for dist lookup table prefetch + void SetPrefetchPreparedCtx( + std::unordered_map< + std::string, std::shared_ptr>* g) { + prefetch_var_name_to_prepared_ctx_ = g; + } + + void SetCheckpointNotifyPreparedCtx( + std::shared_ptr g) { + checkpoint_prepared_ctx_ = g; + } + + // Used for async. + void SetGradToPreparedCtx( + std::unordered_map< + std::string, std::shared_ptr>* g) { + grad_to_prepared_ctx_ = g; + } + + void SetRPCServer(RPCServer* rpc_server) { rpc_server_ = rpc_server; } + + // Get attributes. + bool sync_mode() { return sync_mode_; } + framework::Scope* scope() { return scope_; } + const platform::DeviceContext* dev_ctx() { return dev_ctx_; } + framework::ProgramDesc* program() { return program_; } + framework::Executor* executor() { return executor_; } + + // This function processes user's rpc request. + // The implemention is in request_handler_impl. + // example: + // std::string varname = request_.varname(); + // + // auto scope = request_handler_->scope(); + // auto invar = scope->FindVar(varname); + // framework::Variable* outvar = nullptr; + // + // request_handler_->Handle(varname, scope, invar, &outvar); + // if (outvar) { + // SerializeToByteBuffer(varname, outvar, + // *request_handler_->dev_ctx(), &reply_); + // } + virtual bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar, + const std::string& out_var_name = "") = 0; + + protected: + const bool sync_mode_; + + const platform::DeviceContext* dev_ctx_; + framework::Executor* executor_; + framework::Scope* scope_; + framework::ProgramDesc* program_; + + // used for distribute lookup table prefetch + std::unordered_map>* + prefetch_var_name_to_prepared_ctx_; + // used for checkpoint notify + std::shared_ptr checkpoint_prepared_ctx_; + + // Used for async. + std::unordered_map>* + grad_to_prepared_ctx_; + + RPCServer* rpc_server_; +}; + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/request_handler_impl.cc b/paddle/fluid/operators/distributed/request_handler_impl.cc new file mode 100644 index 0000000000..de1a503154 --- /dev/null +++ b/paddle/fluid/operators/distributed/request_handler_impl.cc @@ -0,0 +1,144 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/operators/distributed/request_handler_impl.h" +#include "paddle/fluid/operators/distributed/rpc_server.h" +#include "paddle/fluid/string/printf.h" + +namespace paddle { +namespace operators { +namespace distributed { + +// define LOOKUP_TABLE_PATH for checkpoint notify to save lookup table variables +// to directory specified. +constexpr char LOOKUP_TABLE_PATH[] = "kLookupTablePath"; + +bool RequestSendHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar, + const std::string& out_var_name) { + VLOG(4) << "RequestSendHandler:" << varname; + + // Async + if (!sync_mode_) { + rpc_server_->Profiler().OneStep(); + try { + executor_->RunPreparedContext((*grad_to_prepared_ctx_)[varname].get(), + scope); + } catch (std::exception& e) { + LOG(ERROR) << "async: run sub program error " << e.what(); + return false; + } + return true; + } + + // Sync + if (varname == BATCH_BARRIER_MESSAGE) { + VLOG(3) << "sync: recv BATCH_BARRIER_MESSAGE"; + rpc_server_->IncreaseBatchBarrier(kRequestSend); + } else if (varname == COMPLETE_MESSAGE) { + VLOG(3) << "sync: recv complete message"; + rpc_server_->Complete(); + } else { + VLOG(3) << "sync: received var_name: " << varname; + rpc_server_->WaitCond(kRequestSend); + VLOG(3) << "sync: processing received var: " << varname; + + if (invar == nullptr) { + LOG(FATAL) << "sync: Can not find server side var: " << varname; + return false; + } + if (invar->IsType()) { + std::unique_lock lock(mutex_sparse_vars_); + sparse_vars_.push_back(invar); + } + } + return true; +} + +void RequestSendHandler::ResetSparseVarRecorder() { + std::unique_lock lock(mutex_sparse_vars_); + for (auto* var : sparse_vars_) { + var->GetMutable()->mutable_rows()->clear(); + } + sparse_vars_.clear(); +} + +bool RequestGetHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar, + const std::string& out_var_name) { + VLOG(4) << "RequestGetHandler:" << varname; + if (sync_mode_) { + if (varname == FETCH_BARRIER_MESSAGE) { + VLOG(3) << "sync: recv fetch barrier message"; + rpc_server_->IncreaseBatchBarrier(kRequestGet); + } else { + rpc_server_->WaitCond(kRequestGet); + *outvar = scope_->FindVar(varname); + } + } else { + if (varname != FETCH_BARRIER_MESSAGE && varname != COMPLETE_MESSAGE) { + *outvar = scope_->FindVar(varname); + } + } + return true; +} + +bool RequestPrefetchHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar, + const std::string& out_var_name) { + VLOG(4) << "RequestPrefetchHandler " << varname; + + auto var_desc = program_->Block(0).FindVar(out_var_name); + InitializeVariable(*outvar, var_desc->GetType()); + executor_->RunPreparedContext( + (*prefetch_var_name_to_prepared_ctx_)[varname].get(), scope); + + return true; +} + +bool RequestCheckpointHandler::Handle(const std::string& varname, + framework::Scope* scope, + framework::Variable* invar, + framework::Variable** outvar, + const std::string& out_var_name) { + PADDLE_ENFORCE( + checkpoint_notify_id != -1, + "when checkpoint_notify_id = -1, there should be no RPC invoke."); + + auto* lt_var = scope->FindVar(LOOKUP_TABLE_PATH)->GetMutable(); + lt_var->clear(); + lt_var->append(out_var_name); + VLOG(4) << "RequestCheckpointHandler update var kLookupTablePath to: " + << out_var_name; + executor_->RunPreparedContext(checkpoint_prepared_ctx_.get(), scope); + return true; +} + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/request_handler_impl.h b/paddle/fluid/operators/distributed/request_handler_impl.h new file mode 100644 index 0000000000..87185500f2 --- /dev/null +++ b/paddle/fluid/operators/distributed/request_handler_impl.h @@ -0,0 +1,86 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include +#include +#include +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/var_type.h" +#include "paddle/fluid/operators/distributed/request_handler.h" + +namespace paddle { +namespace operators { +namespace distributed { + +class RequestSendHandler final : public RequestHandler { + public: + explicit RequestSendHandler(bool sync_mode) : RequestHandler(sync_mode) {} + virtual ~RequestSendHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar, + const std::string& out_var_name = "") override; + void ResetSparseVarRecorder(); + + private: + std::mutex mutex_sparse_vars_; + std::vector sparse_vars_; +}; + +class RequestGetHandler final : public RequestHandler { + public: + explicit RequestGetHandler(bool sync_mode) : RequestHandler(sync_mode) {} + virtual ~RequestGetHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar, + const std::string& out_var_name = "") override; +}; + +class RequestPrefetchHandler final : public RequestHandler { + public: + explicit RequestPrefetchHandler(bool sync_mode) : RequestHandler(sync_mode) {} + virtual ~RequestPrefetchHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar, + const std::string& out_var_name = "") override; +}; + +class RequestCheckpointHandler final : public RequestHandler { + public: + explicit RequestCheckpointHandler(bool sync_mode, int checkpoint_notify_id) + : RequestHandler(sync_mode) { + this->checkpoint_notify_id = checkpoint_notify_id; + } + virtual ~RequestCheckpointHandler() {} + bool Handle(const std::string& varname, framework::Scope* scope, + framework::Variable* var, framework::Variable** outvar, + const std::string& out_var_name = "") override; + + private: + int checkpoint_notify_id; +}; + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/rpc_client.cc b/paddle/fluid/operators/distributed/rpc_client.cc new file mode 100644 index 0000000000..b5ec9fe536 --- /dev/null +++ b/paddle/fluid/operators/distributed/rpc_client.cc @@ -0,0 +1,30 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/distributed/rpc_client.h" +#include "gflags/gflags.h" + +// default to 3min to avoid temprary network failures. +DEFINE_int32(rpc_deadline, 180000, "deadline timeouts for rpc"); + +namespace paddle { +namespace operators { +namespace distributed { + +std::once_flag RPCClient::init_flag_; +std::unique_ptr RPCClient::rpc_client_(nullptr); + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/rpc_client.h b/paddle/fluid/operators/distributed/rpc_client.h new file mode 100644 index 0000000000..22a022a5d2 --- /dev/null +++ b/paddle/fluid/operators/distributed/rpc_client.h @@ -0,0 +1,97 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "gflags/gflags.h" + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/scope.h" + +DECLARE_int32(rpc_deadline); + +namespace paddle { +namespace operators { +namespace distributed { + +class RPCClient { + public: + RPCClient() {} + virtual ~RPCClient() {} + virtual bool AsyncSendVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& var_name, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual bool AsyncGetVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& var_name, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual bool AsyncPrefetchVar(const std::string& ep, + const platform::DeviceContext& ctx, + const framework::Scope& scope, + const std::string& in_var_name, + const std::string& out_var_name, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual void AsyncSendBatchBarrier(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual void AsyncSendFetchBarrier(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual void AsyncCheckpointNotify(const std::string& ep, + const std::string& dir, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + virtual void AsyncSendComplete(const std::string& ep, + int64_t time_out = FLAGS_rpc_deadline) = 0; + + // Complete tells all the pserver instances that finishe the training, + // the pserver can reduce it's barrier count, and continue to train + // with other trainers. + virtual void SendComplete() = 0; + + virtual bool Wait() = 0; + + template + static RPCClient* GetInstance() { + std::call_once(init_flag_, &RPCClient::Init); + return rpc_client_.get(); + } + + // Init is called by GetInstance. + template + static void Init() { + if (rpc_client_.get() == nullptr) { + rpc_client_.reset(new T()); + rpc_client_->InitImpl(); + } + } + + protected: + virtual void InitImpl() {} + + private: + static std::once_flag init_flag_; + static std::unique_ptr rpc_client_; +}; +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/rpc_server.cc b/paddle/fluid/operators/distributed/rpc_server.cc new file mode 100644 index 0000000000..406e7294c1 --- /dev/null +++ b/paddle/fluid/operators/distributed/rpc_server.cc @@ -0,0 +1,161 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "paddle/fluid/operators/distributed/rpc_server.h" +#include "paddle/fluid/platform/profiler.h" + +DEFINE_int32(rpc_server_profile_period, 0, + "the period of listen_and_serv to do profile"); +DEFINE_string(rpc_server_profile_path, "/dev/null", + "the profile log file path"); + +namespace paddle { +namespace operators { +namespace distributed { + +RPCServerProfiler::RPCServerProfiler(int profile_period, + const std::string& profile_log_path) + : profile_period_(profile_period), profile_log_path_(profile_log_path) { + step_ = 0; +} + +void RPCServerProfiler::OneStep() { + PADDLE_ENFORCE_LE(step_, profile_period_, + "step_ should not be larger then " + "profile_period_"); + if (profile_period_ <= 0) { + return; + } + + if (step_ == 0) { + auto pf_state = paddle::platform::ProfilerState::kCPU; + paddle::platform::EnableProfiler(pf_state); + } + if (step_ == profile_period_) { + paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kTotal, + profile_log_path_); + step_ = 0; + } else { + step_++; + } +} + +void RPCServer::ShutDown() { + LOG(INFO) << "RPCServer ShutDown "; + ShutDownImpl(); + + exit_flag_ = true; + barrier_cond_.notify_all(); + rpc_cond_.notify_all(); +} + +void RPCServer::SavePort() const { + auto file_path = string::Sprintf("/tmp/paddle.%d.port", ::getpid()); + std::ofstream port_file; + port_file.open(file_path); + port_file << selected_port_; + port_file.close(); + VLOG(4) << "selected port written to " << file_path; +} + +void RPCServer::WaitBarrier(const std::string& rpc_name) { + std::unique_lock lock(this->mutex_); + barrier_cond_.wait(lock, [this, &rpc_name] { + return ((barrier_counter_[rpc_name] == client_num_ && client_num_ != 0) || + exit_flag_.load()); + }); + + VLOG(3) << "batch_barrier_: " << rpc_name << " " + << barrier_counter_[rpc_name]; +} + +void RPCServer::IncreaseBatchBarrier(const std::string rpc_name) { + VLOG(4) << "RPCServer begin IncreaseBatchBarrier " << rpc_name; + int b = 0; + std::unique_lock lock(mutex_); + b = ++barrier_counter_[rpc_name]; + if (b >= client_num_) { + lock.unlock(); + barrier_cond_.notify_all(); + lock.lock(); + } +} + +void RPCServer::Complete() { + { + std::unique_lock lock(mutex_); + client_num_--; + VLOG(4) << "decrease client_num to: " << client_num_; + if (cur_cond_.load() == rpc_cond_map_[kRequestGet]) { + barrier_counter_[kRequestGet]--; + } + } + barrier_cond_.notify_all(); +} + +int RPCServer::GetClientNum() { + std::unique_lock lock(mutex_); + return client_num_; +} + +void RPCServer::ResetBarrierCounter() { + VLOG(3) << "RPCServer ResetBarrierCounter "; + std::unique_lock lock(mutex_); + for (auto& t : barrier_counter_) { + t.second = 0; + } +} + +void RPCServer::RegisterRPC(const std::string& rpc_name, + RequestHandler* handler, int thread_num) { + rpc_call_map_[rpc_name] = handler; + rpc_thread_num_[rpc_name] = thread_num; + + static int cond = -1; + rpc_cond_map_[rpc_name] = ++cond; + VLOG(4) << "RegisterRPC rpc_name:" << rpc_name << ", handler:" << handler + << ", cond:" << rpc_cond_map_[rpc_name]; +} + +void RPCServer::SetCond(const std::string& rpc_name) { + VLOG(3) << "RPCServer SetCond " << rpc_name; + { + std::unique_lock lock(mutex_); + cur_cond_ = rpc_cond_map_[rpc_name]; + } + + rpc_cond_.notify_all(); +} + +void RPCServer::WaitCond(const std::string& rpc_name) { + VLOG(4) << "RPCServer WaitCond " << rpc_name; + int cond = 0; + { + std::unique_lock lock(mutex_); + cond = rpc_cond_map_[rpc_name]; + } + + std::unique_lock lock(mutex_); + rpc_cond_.wait( + lock, [=] { return (cur_cond_.load() == cond || exit_flag_.load()); }); +} + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/distributed/rpc_server.h b/paddle/fluid/operators/distributed/rpc_server.h new file mode 100644 index 0000000000..d813ba03e2 --- /dev/null +++ b/paddle/fluid/operators/distributed/rpc_server.h @@ -0,0 +1,115 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include // NOLINT +#include +#include + +#include "paddle/fluid/operators/distributed/request_handler.h" + +DECLARE_int32(rpc_server_profile_period); +DECLARE_string(rpc_server_profile_path); + +namespace paddle { +namespace operators { +namespace distributed { + +class RPCServerProfiler { + public: + RPCServerProfiler(int profile_period, const std::string& profile_log_path); + void OneStep(); + + private: + const int profile_period_; + std::string profile_log_path_; + int step_; +}; + +class RPCServer { + public: + explicit RPCServer(const std::string& address, int client_num) + : cur_cond_(0), + profiler_(FLAGS_rpc_server_profile_period, + FLAGS_rpc_server_profile_path), + bind_address_(address), + exit_flag_(false), + selected_port_(0), + client_num_(client_num) {} + + virtual ~RPCServer() {} + virtual void StartServer() = 0; + virtual void WaitServerReady() = 0; + + void ShutDown(); + + bool IsExit() { return exit_flag_.load(); } + + int GetSelectedPort() const { return selected_port_; } + + int GetClientNum(); + + void SavePort() const; + + // RegisterRPC, register the rpc method name to a handler + // class, and auto generate a condition id for this call + // to be used for the barrier. + void RegisterRPC(const std::string& rpc_name, RequestHandler* handler, + int thread_num = 5); + + // Wait util all the clients have reached the barrier for one + // rpc method. This function should be called in the + // RequestHandler if you want to run the server/client in a + // synchronous mode. + void WaitBarrier(const std::string& rpc_name); + + void SetCond(const std::string& rpc_name); + void WaitCond(const std::string& rpc_name); + void IncreaseBatchBarrier(const std::string rpc_name); + + void Complete(); + + void ResetBarrierCounter(); + RPCServerProfiler& Profiler() { return profiler_; } + + protected: + virtual void ShutDownImpl() = 0; + + private: + std::mutex mutex_; + std::unordered_map barrier_counter_; + std::condition_variable barrier_cond_; + + std::unordered_map rpc_cond_map_; + std::atomic cur_cond_; + std::condition_variable rpc_cond_; + RPCServerProfiler profiler_; + + protected: + std::string bind_address_; + std::atomic exit_flag_; + int selected_port_; + int client_num_; + + std::unordered_map rpc_call_map_; + std::unordered_map rpc_thread_num_; + friend class RequestHandler; +}; + +}; // namespace distributed +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/distributed/rpc_server_test.cc b/paddle/fluid/operators/distributed/rpc_server_test.cc new file mode 100644 index 0000000000..b50830c362 --- /dev/null +++ b/paddle/fluid/operators/distributed/rpc_server_test.cc @@ -0,0 +1,184 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include // NOLINT + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/operators/distributed/request_handler_impl.h" +#include "paddle/fluid/operators/distributed/rpc_client.h" +#include "paddle/fluid/operators/distributed/rpc_server.h" + +namespace framework = paddle::framework; +namespace platform = paddle::platform; +namespace distributed = paddle::operators::distributed; + +USE_NO_KERNEL_OP(lookup_sparse_table); + +std::unique_ptr g_rpc_service; +std::unique_ptr g_req_handler; + +framework::BlockDesc* AppendPrefetchBlcok(framework::ProgramDesc* program) { + auto root_block = program->MutableBlock(0); + auto* block = program->AppendBlock(*root_block); + + framework::VariableNameMap input({{"W", {"w"}}, {"Ids", {"ids"}}}); + framework::VariableNameMap output({{"Output", {"out"}}}); + auto op = block->AppendOp(); + op->SetType("lookup_sparse_table"); + op->SetInput("W", {"w"}); + op->SetInput("Ids", {"ids"}); + op->SetOutput("Out", {"out"}); + + auto& out = *root_block->Var("out"); + out.SetType(framework::proto::VarType::LOD_TENSOR); + out.SetShape({10, 10}); + + return block; +} + +void CreateVarsOnScope(framework::Scope* scope, platform::CPUPlace* place) { + auto w_var = scope->Var("w"); + w_var->GetMutable(); + + auto out_var = scope->Var("out"); + out_var->GetMutable(); + + auto ids_var = scope->Var("ids"); + ids_var->GetMutable(); +} + +void InitTensorsOnClient(framework::Scope* scope, platform::CPUPlace* place, + int64_t rows_numel) { + CreateVarsOnScope(scope, place); + auto ids_var = scope->Var("ids")->GetMutable(); + int64_t* ids_ptr = + ids_var->mutable_data(framework::DDim({rows_numel, 1}), *place); + for (int64_t i = 0; i < rows_numel; ++i) ids_ptr[i] = i * 2; +} + +void InitTensorsOnServer(framework::Scope* scope, platform::CPUPlace* place, + int64_t rows_numel) { + CreateVarsOnScope(scope, place); + auto w = scope->Var("w")->GetMutable(); + auto rows = w->mutable_rows(); + for (int64_t i = 0; i < rows_numel; ++i) rows->push_back(i); + auto w_value = w->mutable_value(); + w_value->Resize({rows_numel, 10}); + + auto ptr = w_value->mutable_data(*place); + + for (int64_t i = 0; i < w_value->numel(); ++i) { + ptr[i] = static_cast(i / 10); + } +} + +void StartServer(const std::string& rpc_name) { + framework::ProgramDesc program; + framework::Scope scope; + platform::CPUPlace place; + framework::Executor exe(place); + platform::CPUDeviceContext ctx(place); + auto* block = AppendPrefetchBlcok(&program); + std::string in_var_name("ids"); + std::vector prefetch_block_ids{block->ID()}; + auto prepared = exe.Prepare(program, prefetch_block_ids); + InitTensorsOnServer(&scope, &place, 10); + + std::unordered_map> + prefetch_var_name_to_prepared; + prefetch_var_name_to_prepared[in_var_name] = prepared[0]; + + g_req_handler->SetProgram(&program); + g_req_handler->SetPrefetchPreparedCtx(&prefetch_var_name_to_prepared); + g_req_handler->SetDevCtx(&ctx); + g_req_handler->SetScope(&scope); + g_req_handler->SetExecutor(&exe); + + g_rpc_service->RegisterRPC(rpc_name, g_req_handler.get()); + g_req_handler->SetRPCServer(g_rpc_service.get()); + + std::thread server_thread( + std::bind(&distributed::RPCServer::StartServer, g_rpc_service.get())); + + server_thread.join(); +} + +TEST(PREFETCH, CPU) { + g_req_handler.reset(new distributed::RequestPrefetchHandler(true)); + g_rpc_service.reset(new RPCSERVER_T("127.0.0.1:0", 1)); + distributed::RPCClient* client = + distributed::RPCClient::GetInstance(); + + std::thread server_thread(StartServer, distributed::kRequestPrefetch); + g_rpc_service->WaitServerReady(); + + int port = g_rpc_service->GetSelectedPort(); + std::string ep = paddle::string::Sprintf("127.0.0.1:%d", port); + + framework::Scope scope; + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + { + // create var on local scope + int64_t rows_numel = 5; + InitTensorsOnClient(&scope, &place, rows_numel); + std::string in_var_name("ids"); + std::string out_var_name("out"); + + client->AsyncPrefetchVar(ep, ctx, scope, in_var_name, out_var_name); + client->Wait(); + auto var = scope.Var(out_var_name); + auto value = var->GetMutable(); + auto ptr = value->mutable_data(place); + + for (int64_t i = 0; i < rows_numel; ++i) { + EXPECT_EQ(ptr[0 + i * value->dims()[1]], static_cast(i * 2)); + } + } + + g_rpc_service->ShutDown(); + server_thread.join(); + LOG(INFO) << "begin reset"; + g_rpc_service.reset(nullptr); + g_req_handler.reset(nullptr); +} + +TEST(COMPLETE, CPU) { + g_req_handler.reset(new distributed::RequestSendHandler(true)); + g_rpc_service.reset(new RPCSERVER_T("127.0.0.1:0", 2)); + distributed::RPCClient* client = + distributed::RPCClient::GetInstance(); + PADDLE_ENFORCE(client != nullptr); + std::thread server_thread(StartServer, distributed::kRequestSend); + g_rpc_service->WaitServerReady(); + int port = g_rpc_service->GetSelectedPort(); + std::string ep = paddle::string::Sprintf("127.0.0.1:%d", port); + client->AsyncSendComplete(ep); + client->Wait(); + + EXPECT_EQ(g_rpc_service->GetClientNum(), 1); + + g_rpc_service->ShutDown(); + server_thread.join(); + g_rpc_service.reset(nullptr); + g_req_handler.reset(nullptr); +} diff --git a/paddle/fluid/operators/detail/send_recv.proto b/paddle/fluid/operators/distributed/send_recv.proto.in similarity index 90% rename from paddle/fluid/operators/detail/send_recv.proto rename to paddle/fluid/operators/distributed/send_recv.proto.in index fffa9ae7a4..8b0a09abe1 100644 --- a/paddle/fluid/operators/detail/send_recv.proto +++ b/paddle/fluid/operators/distributed/send_recv.proto.in @@ -1,3 +1,4 @@ + /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +15,8 @@ limitations under the License. */ syntax = "proto3"; package sendrecv; +option cc_generic_services = @cc_generic_services@; + service SendRecvService { // For parameter server round-robin like hashing, do not split tensors. // Send and recv only one tensor @@ -23,6 +26,8 @@ service SendRecvService { rpc GetVariable(VariableMessage) returns (VariableMessage) {} // pre-fetch variable by given variable name and Ids rpc PrefetchVariable(VariableMessage) returns (VariableMessage) {} + + rpc CheckpointNotify(VariableMessage) returns (VoidMessage) {} } // VariableMessage is serialized paddle variable message. @@ -32,6 +37,7 @@ service SendRecvService { enum VarType { LOD_TENSOR = 0; SELECTED_ROWS = 1; + NCCL_ID = 2; } // NOTICE(gongwb):don't modify this proto if you are not @@ -69,10 +75,10 @@ message VariableMessage { bytes rows = 9; // Look up table block execution output variable name. string out_varname = 10; - // If true, the ps server will start profiling, the ps + // If 1, the ps server will start profiling, the ps // server stops profiling and generates a profile to /tmp/profile_ps_* - // when profile switches from true to false. - bool profile = 11; + // when profile switches from 1 to 2. + int64 profile = 11; } message VoidMessage {} diff --git a/paddle/fluid/operators/distributed/sendrecvop_utils.cc b/paddle/fluid/operators/distributed/sendrecvop_utils.cc new file mode 100644 index 0000000000..6a3f8fd544 --- /dev/null +++ b/paddle/fluid/operators/distributed/sendrecvop_utils.cc @@ -0,0 +1,116 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#ifdef PADDLE_WITH_CUDA +#include +#endif +#include +#include // NOLINT + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" +#include "paddle/fluid/operators/distributed/variable_response.h" + +namespace paddle { +namespace operators { +namespace distributed { + +using VarMsg = sendrecv::VariableMessage; + +#ifdef PADDLE_WITH_CUDA +void* GetVarPayLoad(const std::string varname, int64_t size) { + platform::CUDAPinnedPlace cuda_pinned; + return memory::Alloc(cuda_pinned, size); +} +#endif + +void GetTensorPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size) { + auto tensor = var->Get(); + // FIXME(wuyi): data types in send_recv.proto is copied from + // framework.proto + request->set_data_type( + static_cast(framework::ToDataType(tensor.type()))); + for (auto& dim : framework::vectorize(tensor.dims())) { + request->add_dims(dim); + } + const framework::LoD lod = tensor.lod(); + if (lod.size() > 0) { + request->set_lod_level(lod.size()); + for (auto& each : lod) { + VarMsg::LodData* lod_inner = request->add_lod(); + for (auto& d : each) { + lod_inner->add_lod_data(d); + } + } + } + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + PADDLE_ENFORCE(platform::is_gpu_place(tensor.place())); + // platform::CUDAPinnedPlace cuda_pinned; + auto& gpu_dev_ctx = static_cast(ctx); + auto copy_size = tensor.numel() * framework::SizeOfType(tensor.type()); + *payload = GetVarPayLoad(request->varname(), copy_size); + + platform::CUDAPinnedPlace cuda_pinned; + memory::Copy(cuda_pinned, *payload, + boost::get(tensor.place()), + reinterpret_cast(tensor.data()), copy_size, + gpu_dev_ctx.stream()); + + ctx.Wait(); +#endif + } else { + *payload = tensor.data(); + } + *payload_size = tensor.numel() * framework::SizeOfType(tensor.type()); +} + +void GetSelectedRowsPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size) { + auto* slr = var->GetMutable(); + request->set_data_type( + static_cast(framework::ToDataType(slr->value().type()))); + request->set_lod_level(0); + request->set_slr_height(slr->height()); + + for (auto& dim : framework::vectorize(slr->value().dims())) { + request->add_dims(dim); + } + + auto* tensor = slr->mutable_value(); + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef PADDLE_WITH_CUDA + auto& gpu_dev_ctx = static_cast(ctx); + auto copy_size = tensor->numel() * framework::SizeOfType(tensor->type()); + *payload = GetVarPayLoad(request->varname(), copy_size); + + platform::CUDAPinnedPlace cuda_pinned; + memory::Copy(cuda_pinned, *payload, + boost::get(tensor->place()), + reinterpret_cast(tensor->data()), copy_size, + gpu_dev_ctx.stream()); + ctx.Wait(); +#endif + } else { + *payload = slr->mutable_value()->data(); + } + *payload_size = tensor->numel() * framework::SizeOfType(tensor->type()); +} + +} // namespace distributed +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/detail/sendrecvop_utils.h b/paddle/fluid/operators/distributed/sendrecvop_utils.h similarity index 61% rename from paddle/fluid/operators/detail/sendrecvop_utils.h rename to paddle/fluid/operators/distributed/sendrecvop_utils.h index c72e1bd076..4d08d3c77a 100644 --- a/paddle/fluid/operators/detail/sendrecvop_utils.h +++ b/paddle/fluid/operators/distributed/sendrecvop_utils.h @@ -25,34 +25,21 @@ limitations under the License. */ #include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" -#include "paddle/fluid/operators/detail/send_recv.pb.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" namespace paddle { namespace operators { -namespace detail { +namespace distributed { -#define LISTEN_TERMINATE_MESSAGE "TERMINATE@RECV" -#define BATCH_BARRIER_MESSAGE "BATCH_BARRIER@RECV" -#define FETCH_BARRIER_MESSAGE "FETCH_BARRIER@RECV" +using VarMsg = sendrecv::VariableMessage; -static int64_t GetTimestamp() { - struct timeval tp; - gettimeofday(&tp, NULL); - return tp.tv_sec * 1000 + tp.tv_usec / 1000; -} - -typedef void (*DestroyCallback)(void*); - -void SerializeToByteBuffer(const std::string& name, framework::Variable* var, - const platform::DeviceContext& ctx, - ::grpc::ByteBuffer* msg, - const std::string& out_varname = std::string()); +void GetTensorPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size); -void DeserializeFromByteBuffer(const ::grpc::ByteBuffer& msg, - const platform::DeviceContext& ctx, - const framework::Scope* scope, - framework::Variable** var); +void GetSelectedRowsPayload(framework::Variable* var, + const platform::DeviceContext& ctx, VarMsg* request, + void** payload, size_t* payload_size); inline std::type_index ToTypeIndex(sendrecv::VariableMessage::Type type) { switch (type) { @@ -71,6 +58,6 @@ inline std::type_index ToTypeIndex(sendrecv::VariableMessage::Type type) { } } -} // namespace detail +} // namespace distributed } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/distributed/variable_response.cc b/paddle/fluid/operators/distributed/variable_response.cc new file mode 100644 index 0000000000..466bce18af --- /dev/null +++ b/paddle/fluid/operators/distributed/variable_response.cc @@ -0,0 +1,214 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/distributed/variable_response.h" +#include +#include "paddle/fluid/operators/distributed/sendrecvop_utils.h" + +namespace paddle { +namespace operators { +namespace distributed { + +bool VariableResponse::ReadRaw(::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& dev_ctx, + platform::Place place, void* dest, + int64_t size) { + const void* data = NULL; + int size_to_write = 0; + int64_t length = size; + int total_written = 0; + + if (platform::is_gpu_place(place)) { +#ifdef PADDLE_WITH_CUDA + auto& gpu_dev_ctx = + static_cast(dev_ctx); + platform::CPUPlace cpu; + + char* p = reinterpret_cast(dest); + while (total_written < length) { + if (!input->GetDirectBufferPointer(&data, &size_to_write)) { + return false; + } + // NOTE: if raw buffer is large and have two neighbor fields of raw + // buffers GetDirectBufferPointer can get all of them, use length to + // truncate it. + if (total_written + size_to_write > length) { + size_to_write = length - total_written; + } + // This log is useful to see how long a internal block size is of rpc. + VLOG(7) << "copy " << size_to_write << " data to CUDAPlace"; + memory::Copy(boost::get(place), + reinterpret_cast(p), cpu, data, size_to_write, + gpu_dev_ctx.stream()); + p += size_to_write; + total_written += size_to_write; + + input->Skip(size_to_write); + } + gpu_dev_ctx.Wait(); +#else + PADDLE_THROW("Unexpected branch"); +#endif + return true; + } + + char* p = reinterpret_cast(dest); + while (total_written < length) { + if (!input->GetDirectBufferPointer(&data, &size_to_write)) { + return false; + } + // NOTE: if raw buffer is large and have two neighbor fields of raw buffers + // GetDirectBufferPointer can get all of them, use length to truncate it. + if (total_written + size_to_write > length) { + size_to_write = length - total_written; + } + // TODO(gongwb): can we avoid copy? + platform::CPUPlace cpu; + // This log is useful to see how long a internal block size is of rpc. + VLOG(7) << "copy " << size_to_write << " data to CPUPlace"; + memory::Copy(cpu, reinterpret_cast(p), cpu, data, size_to_write); + + p += size_to_write; + total_written += size_to_write; + + input->Skip(size_to_write); + } + + return true; +} + +bool VariableResponse::CopyLodTensorData( + ::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, const framework::DDim& dims, + int length) { + auto* tensor = GetVar()->GetMutable(); + tensor->Resize(dims); + + framework::LoD lod; + for (int i = 0; i < meta_.lod_level(); ++i) { + framework::Vector v; + for (int j = 0; j < meta_.lod(i).lod_data_size(); ++j) { + v.push_back(meta_.lod(i).lod_data(j)); + } + lod.push_back(v); + } + tensor->set_lod(lod); + + void* tensor_data = + tensor->mutable_data(ctx.GetPlace(), ToTypeIndex(meta_.data_type())); + + if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { + return false; + } + + return true; +} + +inline framework::DDim GetDims( + const ::google::protobuf::RepeatedField<::google::protobuf::int64>& dims) { + std::vector vecdims; + for (auto& d : dims) { + vecdims.push_back(d); + } + return framework::make_ddim(vecdims); +} + +bool VariableResponse::CopySelectRowsTensorData( + ::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, const framework::DDim& dims, + int length) { + auto* slr = GetVar()->GetMutable(); + slr->set_height(meta_.slr_height()); + auto* tensor = slr->mutable_value(); + tensor->Resize(dims); + PADDLE_ENFORCE_EQ(static_cast(tensor->numel()), + length / framework::SizeOfType( + paddle::operators::distributed::ToTypeIndex( + meta_.data_type()))); + void* tensor_data = tensor->mutable_data( + ctx.GetPlace(), + paddle::operators::distributed::ToTypeIndex(meta_.data_type())); + + if (!ReadRaw(input, ctx, tensor->place(), tensor_data, length)) { + return false; + } + + return true; +} + +bool VariableResponse::CopySelectRowsData( + ::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& ctx, int length) { + auto* slr = GetVar()->GetMutable(); + slr->mutable_rows()->resize(length / + framework::SizeOfType(typeid(int64_t))); // int64 + int64_t* rows_data = slr->mutable_rows()->data(); + + // copy rows CPU data, GPU data will be copied lazily. + platform::CPUPlace cpu; + if (!ReadRaw(input, ctx, cpu, rows_data, length)) { + return false; + } + + return true; +} + +bool VariableResponse::ProcSerializedField( + int tag, ::google::protobuf::io::CodedInputStream* input, + int64_t num_bytes) { + PADDLE_ENFORCE((meta_.type() == sendrecv::SELECTED_ROWS || + meta_.type() == sendrecv::LOD_TENSOR || + meta_.type() == sendrecv::NCCL_ID) && + meta_.varname() != "", + "meta info should be got first!"); + + if (meta_.type() == sendrecv::NCCL_ID) { +#ifdef PADDLE_WITH_CUDA + auto* var = scope_->FindVar(meta_.varname()); + if (var != nullptr) { + ncclUniqueId* id = var->GetMutable(); + if (!ReadRaw(input, *dev_ctx_, platform::CPUPlace(), id->internal, + num_bytes)) { + return false; + } + } + return true; +#else + PADDLE_THROW("Not compiled with CUDA!"); + return false; +#endif + } + + framework::DDim dims = GetDims(meta_.dims()); + if (meta_.type() == sendrecv::LOD_TENSOR) { + PADDLE_ENFORCE(meta_.lod_size() >= 0, "lod info should be got first!"); + if (!CopyLodTensorData(input, *dev_ctx_, dims, num_bytes)) { + return false; + } + return true; + } + + if (meta_.type() == sendrecv::SELECTED_ROWS) { + if (!CopySelectRowsTensorData(input, *dev_ctx_, dims, num_bytes)) { + return false; + } + return true; + } + + return true; +} + +}; // namespace distributed +}; // namespace operators +}; // namespace paddle diff --git a/paddle/fluid/operators/detail/variable_response.h b/paddle/fluid/operators/distributed/variable_response.h similarity index 60% rename from paddle/fluid/operators/detail/variable_response.h rename to paddle/fluid/operators/distributed/variable_response.h index bf624da2a6..6aec52ca00 100644 --- a/paddle/fluid/operators/detail/variable_response.h +++ b/paddle/fluid/operators/distributed/variable_response.h @@ -22,17 +22,34 @@ #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/framework/var_type.h" -#include "paddle/fluid/operators/detail/send_recv.grpc.pb.h" -#include "paddle/fluid/operators/detail/send_recv.pb.h" - #include "google/protobuf/io/coded_stream.h" #include "google/protobuf/io/zero_copy_stream.h" #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/operators/detail/bytebuffer_stream.h" +#include "paddle/fluid/operators/distributed/send_recv.pb.h" namespace paddle { namespace operators { -namespace detail { +namespace distributed { + +// Source provides a way for a particular RPC implementation to provide +// received data to ParseFrom. +class Source { + public: + virtual ~Source() {} + + // Return the stream that contains the data to be parsed. + // Note that this method might be invoked more than once if + // ParseFrom needs to fall back to a more expensive parsing method. + // Every call must return a stream pointing at the beginning of + // the serialized RecvTensorResponse. + // + // Note that a subsequent call to contents() invalidates previous + // results of contents(). + // + // Ownership of the returned stream is retained by the Source and + // should not be deleted by the caller. + virtual ::google::protobuf::io::ZeroCopyInputStream* contents() = 0; +}; class VariableResponse { public: @@ -51,24 +68,21 @@ class VariableResponse { } } - // return: - // 0:ok. - // -1: unkown error. - // other: number of error field. - int Parse(Source* source); + int Parse(Source* source, const sendrecv::VariableMessage& meta) { + meta_ = meta; + return Parse(source); + } // return: // 0:ok. // -1: unkown error. // other: number of error field. - int Parse(const ::grpc::ByteBuffer& byte_buffer); - - const framework::Scope& GetLocalScope() const { return *local_scope_; } - - framework::Scope* GetMutableLocalScope() const { return local_scope_; } + virtual int Parse(Source* source) = 0; - inline std::string Varname() { return meta_.varname(); } - inline std::string OutVarname() { return meta_.out_varname(); } + inline const framework::Scope& GetLocalScope() const { return *local_scope_; } + inline framework::Scope* GetMutableLocalScope() const { return local_scope_; } + inline std::string Varname() const { return meta_.varname(); } + inline std::string OutVarname() const { return meta_.out_varname(); } // should call parse first. framework::Variable* GetVar() { @@ -78,7 +92,11 @@ class VariableResponse { return scope_->FindVar(meta_.varname()); } - private: + protected: + bool ReadRaw(::google::protobuf::io::CodedInputStream* input, + const platform::DeviceContext& dev_ctx, platform::Place place, + void* dest, int64_t size); + bool CopySelectRowsTensorData(::google::protobuf::io::CodedInputStream* input, const platform::DeviceContext& ctx, const framework::DDim& dims, int length); @@ -90,15 +108,19 @@ class VariableResponse { const platform::DeviceContext& ctx, const framework::DDim& dims, int length); - private: + bool ProcSerializedField(int tag, + ::google::protobuf::io::CodedInputStream* input, + int64_t num_bytes); + + protected: const framework::Scope* scope_; const platform::DeviceContext* dev_ctx_; bool create_scope_ = false; framework::Scope* local_scope_ = nullptr; - // only Skeleton + sendrecv::VariableMessage meta_; }; -}; // namespace detail +}; // namespace distributed }; // namespace operators }; // namespace paddle diff --git a/paddle/fluid/operators/dropout_op.cc b/paddle/fluid/operators/dropout_op.cc index 4ed1b54884..07322e720f 100644 --- a/paddle/fluid/operators/dropout_op.cc +++ b/paddle/fluid/operators/dropout_op.cc @@ -37,8 +37,7 @@ class DropoutOp : public framework::OperatorWithKernel { class DropoutOpMaker : public framework::OpProtoAndCheckerMaker { public: - DropoutOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of dropout op."); AddOutput("Out", "The output of dropout op."); AddOutput("Mask", "The random sampled dropout mask.").AsIntermediate(); diff --git a/paddle/fluid/operators/edit_distance_op.cc b/paddle/fluid/operators/edit_distance_op.cc index c7f037d2df..de25a3dab5 100644 --- a/paddle/fluid/operators/edit_distance_op.cc +++ b/paddle/fluid/operators/edit_distance_op.cc @@ -49,8 +49,7 @@ class EditDistanceOp : public framework::OperatorWithKernel { class EditDistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: - EditDistanceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Hyps", "(2-D LoDTensor, 2nd dim. equal to 1) " "The indices for hypothesis strings."); diff --git a/paddle/fluid/operators/elementwise_add_mkldnn_op.cc b/paddle/fluid/operators/elementwise_add_mkldnn_op.cc new file mode 100644 index 0000000000..c86cd57316 --- /dev/null +++ b/paddle/fluid/operators/elementwise_add_mkldnn_op.cc @@ -0,0 +1,193 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/memory/memcpy.h" +#include "paddle/fluid/operators/elementwise_add_op.h" +#include "paddle/fluid/operators/elementwise_op_function.h" + +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using framework::DataLayout; +using framework::Tensor; +using mkldnn::memory; +using mkldnn::reorder; +using mkldnn::primitive; +using mkldnn::stream; +using mkldnn::sum; + +template +class EltwiseAddMKLDNNKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& dev_ctx = + ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + + auto* x = ctx.Input("X"); + auto* y = ctx.Input("Y"); + auto* z = ctx.Output("Out"); + const T* x_data = x->data(); + const T* y_data = y->data(); + T* z_data = z->mutable_data(ctx.GetPlace()); + + int axis = ctx.Attr("axis"); + + auto x_dims = x->dims(); + auto y_dims_untrimed = y->dims(); + auto z_dims = z->dims(); + + // Execute default elementwise_add operator when + // broadcast operations need to performed. + if (x_dims != y_dims_untrimed) { + auto sum_func = [](T a, T b) -> T { return a + b; }; + + TransformFunctor + functor( + x, y, z, + ctx.template device_context(), + sum_func); + + axis = (axis == -1 ? x_dims.size() - y_dims_untrimed.size() : axis); + PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), + "Axis should be in range [0, x_dims)"); + + auto y_dims = trim_trailing_singular_dims(y_dims_untrimed); + axis = (y_dims.size() == 0) ? x_dims.size() : axis; + + int pre, n, post; + get_mid_dims(x_dims, y_dims, axis, &pre, &n, &post); + + if (post == 1) { + functor.RunRowWise(n, pre); + } else { + functor.RunMidWise(n, pre, post); + } + z->set_layout(DataLayout::kMKLDNN); + z->set_format(x->format()); + } else { + PADDLE_ENFORCE(x->layout() == DataLayout::kMKLDNN && + x->format() != memory::format::format_undef, + "Wrong layout/format set for X tensor"); + PADDLE_ENFORCE(y->layout() == DataLayout::kMKLDNN && + y->format() != memory::format::format_undef, + "Wrong layout/format set for Y tensor"); + + std::vector src_x_tz = framework::vectorize2int(x_dims); + std::vector src_y_tz = framework::vectorize2int(y_dims_untrimed); + std::vector dst_tz = framework::vectorize2int(z_dims); + + std::vector srcs_pd; + std::vector srcs; + std::vector scales = {1.0f, 1.0f}; + + auto src_x_pd = memory::primitive_desc( + {{src_x_tz}, memory::data_type::f32, x->format()}, mkldnn_engine); + auto src_y_pd = memory::primitive_desc( + {{src_y_tz}, memory::data_type::f32, y->format()}, mkldnn_engine); + auto src_x_memory = + memory(src_x_pd, paddle::platform::to_void_cast(x_data)); + auto src_y_memory = + memory(src_y_pd, paddle::platform::to_void_cast(y_data)); + + srcs_pd.push_back(src_x_pd); + srcs_pd.push_back(src_y_pd); + srcs.push_back(src_x_memory); + srcs.push_back(src_y_memory); + + auto dst_md = + memory::desc({dst_tz}, memory::data_type::f32, memory::format::any); + + // create primitive descriptor for sum + auto sum_pd = sum::primitive_desc(dst_md, scales, srcs_pd); + + // create mkldnn memory for dst + memory dst_memory = memory(sum_pd.dst_primitive_desc(), z_data); + + std::vector inputs; + inputs.push_back(srcs[0]); + inputs.push_back(srcs[1]); + + // create sum primitive + auto sum_prim = sum(sum_pd, inputs, dst_memory); + + std::vector pipeline; + pipeline.push_back(sum_prim); + stream(stream::kind::eager).submit(pipeline).wait(); + + z->set_layout(DataLayout::kMKLDNN); + z->set_format( + (memory::format)dst_memory.get_primitive_desc().desc().data.format); + } + } +}; + +template +class EltwiseAddMKLDNNGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + using Tensor = framework::Tensor; + + auto* dout = ctx.Input(framework::GradVarName("Out")); + auto* dx = ctx.Output(framework::GradVarName("X")); + auto* dy = ctx.Output(framework::GradVarName("Y")); + int axis = ctx.Attr("axis"); + // skip out, x, y, + // dout length is larger or equal than dx, dy. + auto* out = dout; + auto *x = dout, *y = dout; + + auto set_mkldnn_format = [](Tensor* in, const Tensor* out) { + in->set_layout(DataLayout::kMKLDNN); + in->set_format(out->format()); + }; + + if (dx != nullptr && dy != nullptr && dx->dims() == dy->dims()) { + if (dx->dims() == dy->dims()) { + auto blas = math::GetBlas(ctx); + if (dx) { + blas.VCOPY(dout->numel(), dout->data(), + dx->mutable_data(ctx.GetPlace())); + set_mkldnn_format(dx, dout); + } + + if (dy) { + blas.VCOPY(dout->numel(), dout->data(), + dy->mutable_data(ctx.GetPlace())); + set_mkldnn_format(dy, dout); + } + } + } else { + // Execute default kernel when broadcast is needed + ElemwiseExplicitGradCompute, IdentityGrad>( + ctx, *x, *y, *out, *dout, axis, dx, dy, IdentityGrad(), + IdentityGrad()); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_KERNEL(elementwise_add, MKLDNN, ::paddle::platform::CPUPlace, + ops::EltwiseAddMKLDNNKernel) + +REGISTER_OP_KERNEL(elementwise_add_grad, MKLDNN, ::paddle::platform::CPUPlace, + ops::EltwiseAddMKLDNNGradKernel) diff --git a/paddle/fluid/operators/elementwise_add_op.cc b/paddle/fluid/operators/elementwise_add_op.cc index 4aab54f602..3c97ac995c 100644 --- a/paddle/fluid/operators/elementwise_add_op.cc +++ b/paddle/fluid/operators/elementwise_add_op.cc @@ -14,26 +14,10 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_add_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseAddOpMaker : public ElementwiseOpMaker { - public: - ElementwiseAddOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Add", "Out = X + Y"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_add, ops::ElementwiseOp, - ops::ElementwiseAddOpMaker, ops::ElementwiseOpInferVarType, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_add_grad, ops::ElementwiseOpGrad); - +REGISTER_ELEMWISE_GRAD_MAKER(elementwise_add, Add); +REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_add, "Add", "Out = X + Y", "Out", + "X"); REGISTER_OP_CPU_KERNEL( elementwise_add, ops::ElementwiseAddKernel, diff --git a/paddle/fluid/operators/elementwise_add_op.h b/paddle/fluid/operators/elementwise_add_op.h index 253964562c..5356105e2e 100644 --- a/paddle/fluid/operators/elementwise_add_op.h +++ b/paddle/fluid/operators/elementwise_add_op.h @@ -14,7 +14,9 @@ limitations under the License. */ #pragma once +#include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { @@ -24,19 +26,57 @@ struct AddFunctor { inline HOSTDEVICE T operator()(T a, T b) const { return a + b; } }; +template +void default_elementwise_add(const framework::ExecutionContext& ctx, + const framework::Tensor* x, + const framework::Tensor* y, framework::Tensor* z) { + int axis = ctx.Attr("axis"); + ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, + AddFunctor(), z); +} + +template +typename std::enable_if< + std::is_floating_point::value && + std::is_same::value>::type +elementwise_add(const framework::ExecutionContext& ctx, + const framework::Tensor* x, const framework::Tensor* y, + framework::Tensor* z) { + auto eigen_x = framework::EigenVector::Flatten(*x); + auto eigen_y = framework::EigenVector::Flatten(*y); + auto eigen_z = framework::EigenVector::Flatten(*z); + + auto blas = math::GetBlas(ctx); + blas.VADD(x->numel(), eigen_x.data(), eigen_y.data(), eigen_z.data()); +} + +template +typename std::enable_if< + !std::is_floating_point::value || + !std::is_same::value>::type +elementwise_add(const framework::ExecutionContext& ctx, + const framework::Tensor* x, const framework::Tensor* y, + framework::Tensor* z) { + default_elementwise_add(ctx, x, y, z); +} + template class ElementwiseAddKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { using Tensor = framework::Tensor; - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* z = ctx.Output("Out"); + const auto x = ctx.Input("X"); + const auto y = ctx.Input("Y"); + auto z = ctx.Output("Out"); z->mutable_data(ctx.GetPlace()); - int axis = ctx.Attr("axis"); - ElementwiseComputeEx, DeviceContext, T>(ctx, x, y, axis, - AddFunctor(), z); + + auto dims_equal = x->dims() == y->dims(); + if (dims_equal) { + elementwise_add(ctx, x, y, z); + } else { + default_elementwise_add(ctx, x, y, z); + } } }; @@ -45,22 +85,76 @@ struct IdentityGrad { HOSTDEVICE T operator()(T x, T y, T out, T dout) const { return dout; } }; +template +void default_elementwise_add_grad(const framework::ExecutionContext& ctx, + const framework::Tensor* x, + const framework::Tensor* y, + const framework::Tensor* out, + const framework::Tensor* dout, + framework::Tensor* dx, + framework::Tensor* dy) { + int axis = ctx.Attr("axis"); + + ElemwiseExplicitGradCompute, + IdentityGrad>(ctx, *x, *y, *out, *dout, axis, + dx, dy, IdentityGrad(), + IdentityGrad()); +} + +template +typename std::enable_if< + std::is_floating_point::value && + std::is_same::value>::type +elementwise_add_grad(const framework::ExecutionContext& ctx, + const framework::Tensor* x, const framework::Tensor* y, + const framework::Tensor* out, + const framework::Tensor* dout, framework::Tensor* dx, + framework::Tensor* dy) { + auto blas = math::GetBlas(ctx); + + if (dx) { + blas.VCOPY(dout->numel(), dout->data(), + dx->mutable_data(ctx.GetPlace())); + } + + if (dy) { + blas.VCOPY(dout->numel(), dout->data(), + dy->mutable_data(ctx.GetPlace())); + } +} + +template +typename std::enable_if< + !std::is_floating_point::value || + !std::is_same::value>::type +elementwise_add_grad(const framework::ExecutionContext& ctx, + const framework::Tensor* x, const framework::Tensor* y, + const framework::Tensor* out, + const framework::Tensor* dout, framework::Tensor* dx, + framework::Tensor* dy) { + default_elementwise_add_grad(ctx, x, y, out, dout, dx, dy); +} + template class ElementwiseAddGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { using Tensor = framework::Tensor; - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* dx = ctx.Output(framework::GradVarName("X")); auto* dy = ctx.Output(framework::GradVarName("Y")); - int axis = ctx.Attr("axis"); - ElemwiseGradCompute, IdentityGrad>( - ctx, *x, *y, *out, *dout, axis, dx, dy, IdentityGrad(), - IdentityGrad()); + // skip out, x, y + auto* out = dout; + auto *x = dout, *y = dout; + + if (platform::is_cpu_place(ctx.GetPlace()) && dx != nullptr && + dy != nullptr && (dx->dims() == dy->dims())) { + elementwise_add_grad(ctx, x, y, out, dout, dx, dy); + } else { + default_elementwise_add_grad(ctx, x, y, out, dout, dx, + dy); + } } }; diff --git a/paddle/fluid/operators/elementwise_div_op.cc b/paddle/fluid/operators/elementwise_div_op.cc index c7ddafcad1..84c8a65e5f 100644 --- a/paddle/fluid/operators/elementwise_div_op.cc +++ b/paddle/fluid/operators/elementwise_div_op.cc @@ -14,26 +14,10 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_div_op.h" #include "paddle/fluid/operators/elementwise_op.h" +namespace ops = paddle::operators; -namespace paddle { -namespace operators { -class ElementwiseDivOpMaker : public ElementwiseOpMaker { - public: - ElementwiseDivOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Div", "Out = X / Y"); - AddComment(comment_); - } -}; - -} // namespace operators -} // namespace paddle +REGISTER_ELEMWISE_OP(elementwise_div, "Div", "Out = X / Y"); -namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_div, ops::ElementwiseOp, - ops::ElementwiseDivOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_div_grad, ops::ElementwiseOpGrad); REGISTER_OP_CPU_KERNEL( elementwise_div, ops::ElementwiseDivKernel, diff --git a/paddle/fluid/operators/elementwise_max_op.cc b/paddle/fluid/operators/elementwise_max_op.cc index a4fe386bb1..411671335a 100644 --- a/paddle/fluid/operators/elementwise_max_op.cc +++ b/paddle/fluid/operators/elementwise_max_op.cc @@ -14,25 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_max_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseMaxOpMaker : public ElementwiseOpMaker { - public: - ElementwiseMaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Max", "Out = max(X, Y)"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_max, ops::ElementwiseOp, - ops::ElementwiseMaxOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_max_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_max, "Max", "Out = max(X, Y)"); REGISTER_OP_CPU_KERNEL( elementwise_max, ops::ElementwiseMaxKernel, diff --git a/paddle/fluid/operators/elementwise_min_op.cc b/paddle/fluid/operators/elementwise_min_op.cc index 68cd6ddb4a..816192083d 100644 --- a/paddle/fluid/operators/elementwise_min_op.cc +++ b/paddle/fluid/operators/elementwise_min_op.cc @@ -14,25 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_min_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseMinOpMaker : public ElementwiseOpMaker { - public: - ElementwiseMinOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Max", "Out = min(X, Y)"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_min, ops::ElementwiseOp, - ops::ElementwiseMinOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_min, "Min", "Out = min(X, Y)"); REGISTER_OP_CPU_KERNEL( elementwise_min, ops::ElementwiseMinKernel, diff --git a/paddle/fluid/operators/elementwise_mul_op.cc b/paddle/fluid/operators/elementwise_mul_op.cc index 2dec27136a..7cd67e74de 100644 --- a/paddle/fluid/operators/elementwise_mul_op.cc +++ b/paddle/fluid/operators/elementwise_mul_op.cc @@ -14,27 +14,8 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_mul_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { - -class ElementwiseMulOpMaker : public ElementwiseOpMaker { - public: - ElementwiseMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Mul", "Out = X \\odot\\ Y"); - AddComment(comment_); - } -}; - -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_mul, ops::ElementwiseOp, - ops::ElementwiseMulOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_mul_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_OP(elementwise_mul, "Mul", "Out = X \\\\odot Y"); REGISTER_OP_CPU_KERNEL( elementwise_mul, ops::ElementwiseMulKernel, diff --git a/paddle/fluid/operators/elementwise_op.h b/paddle/fluid/operators/elementwise_op.h index a33634ab25..d8a12e800a 100644 --- a/paddle/fluid/operators/elementwise_op.h +++ b/paddle/fluid/operators/elementwise_op.h @@ -14,8 +14,12 @@ limitations under the License. */ #pragma once #include +#include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif namespace paddle { namespace operators { @@ -40,86 +44,96 @@ class ElementwiseOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", x_dim); ctx->ShareLoD("X", /*->*/ "Out"); } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto input_data_type = + framework::ToDataType(ctx.Input("X")->type()); + +#ifdef PADDLE_WITH_MKLDNN + if (platform::CanMKLDNNBeUsed(ctx)) { + return framework::OpKernelType(input_data_type, ctx.GetPlace(), + framework::DataLayout::kMKLDNN, + framework::LibraryType::kMKLDNN); + } +#endif + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } }; class ElementwiseOpInferVarType : public framework::VarTypeInference { public: void operator()(const framework::OpDesc& op_desc, framework::BlockDesc* block) const override { - auto x_var = op_desc.Input("X")[0]; - auto out_var = op_desc.Output("Out")[0]; - block->Var(out_var)->SetType(block->Var(x_var)->GetType()); + auto x_name = op_desc.Input("X")[0]; + auto out_name = op_desc.Output("Out")[0]; + auto& x = block->FindRecursiveOrCreateVar(x_name); + auto& out = block->FindRecursiveOrCreateVar(out_name); + out.SetType(x.GetType()); } }; class ElementwiseOpMaker : public framework::OpProtoAndCheckerMaker { public: - ElementwiseOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() final { AddInput("X", "(Tensor), The first input tensor of elementwise op."); AddInput("Y", "(Tensor), The second input tensor of elementwise op."); + // AddOutput("SavedShape", "(Tensor), save X, Y shape for grad to save + // memory.").AsIntermediate(); AddOutput("Out", "The output of elementwise op."); AddAttr("axis", "(int, default -1). The start dimension index " "for broadcasting Y onto X.") .SetDefault(-1) .EqualGreaterThan(-1); - comment_ = R"DOC( -Limited Elementwise {name} Operator. + AddAttr("use_mkldnn", "(bool, default false). Used by MKLDNN.") + .SetDefault(false); + AddComment(string::Sprintf(R"DOC( +Limited Elementwise %s Operator The equation is: -$${equation}$$ +$$%s$$ -$X$ is a tensor of any dimension and the dimensions of tensor $Y$ must be -smaller than or equal to the dimensions of $X$. +- $X$: a tensor of any dimension. +- $Y$: a tensor whose dimensions must be less than or equal to the dimensions of $X$. There are two cases for this operator: -1. The shape of $Y$ is same with $X$; -2. The shape of $Y$ is a congiguous subsequencet of $X$. The trailing dimensions - of size 1 for $Y$ will be ignored for the consideration of subsequence. +1. The shape of $Y$ is the same with $X$. +2. The shape of $Y$ is a continuous subsequence of $X$. For case 2: -$Y$ will be broadcasted to match the shape of $X$ and axis should be -set to index of the start dimension to broadcast $Y$ onto $X$. +1. Broadcast $Y$ to match the shape of $X$, where $axis$ is the start dimension index + for broadcasting $Y$ onto $X$. +2. If $axis$ is -1 (default), $axis = rank(X) - rank(Y)$. +3. The trailing dimensions of size 1 for $Y$ will be ignored for the consideration of + subsequence, such as shape(Y) = (2, 1) => (2). -If axis is -1, it is treated as axis=rank(X)-rank(Y). +For example: -For example .. code-block:: python shape(X) = (2, 3, 4, 5), shape(Y) = (,) shape(X) = (2, 3, 4, 5), shape(Y) = (5,) - shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5) + shape(X) = (2, 3, 4, 5), shape(Y) = (4, 5), with axis=-1(default) or axis=2 shape(X) = (2, 3, 4, 5), shape(Y) = (3, 4), with axis=1 shape(X) = (2, 3, 4, 5), shape(Y) = (2), with axis=0 shape(X) = (2, 3, 4, 5), shape(Y) = (2, 1), with axis=0 -Either of the inputs $X$ and $Y$ or none can carry the LoD (Level of Details) -information. However, the output only shares the LoD information with input $X$. +The inputs $X$ and $Y$ can carry the different LoD information. +But the output only shares the LoD information with the input $X$. -)DOC"; - AddComment(comment_); +)DOC", + GetName(), GetEquation())); + SetReuse(); } protected: - std::string comment_; - - void Replace(std::string* src, std::string from, std::string to) { - std::size_t len_from = std::strlen(from.c_str()); - std::size_t len_to = std::strlen(to.c_str()); - for (std::size_t pos = src->find(from); pos != std::string::npos; - pos = src->find(from, pos + len_to)) { - src->replace(pos, len_from, to); - } - } - - void SetComment(std::string name, std::string equation) { - Replace(&comment_, "{name}", name); - Replace(&comment_, "{equation}", equation); - } + virtual std::string GetName() const = 0; + virtual std::string GetEquation() const = 0; + virtual void SetReuse() {} }; class ElementwiseOpGrad : public framework::OperatorWithKernel { @@ -149,6 +163,98 @@ class ElementwiseOpGrad : public framework::OperatorWithKernel { ctx->SetOutputDim(y_grad_name, y_dims); } } + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto input_data_type = framework::ToDataType( + ctx.Input(framework::GradVarName("Out"))->type()); + +#ifdef PADDLE_WITH_MKLDNN + if (platform::CanMKLDNNBeUsed(ctx)) { + return framework::OpKernelType(input_data_type, ctx.GetPlace(), + framework::DataLayout::kMKLDNN, + framework::LibraryType::kMKLDNN); + } +#endif + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } }; + +// For Add, Sub op, the X, Out is not needed. +class ElementwiseOpExplicitGrad : public ElementwiseOpGrad { + public: + using operators::ElementwiseOpGrad::ElementwiseOpGrad; + using operators::ElementwiseOpGrad::GetExpectedKernelType; + using Tensor = framework::Tensor; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); + ctx->SetOutputDim(x_grad_name, out_dims); + } + auto y_grad_name = framework::GradVarName("Y"); + if (ctx->HasOutput(y_grad_name)) { + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); + auto y_dims = ctx->GetInputDim("Y"); + ctx->SetOutputDim(y_grad_name, y_dims); + } + } +}; + } // namespace operators } // namespace paddle + +/* +*/ + +#define REGISTER_ELEMWISE_GRAD_MAKER(kernel_type, op_name) \ + class kernel_type##GradMaker \ + : public paddle::framework::SingleGradOpDescMaker { \ + public: \ + using ::paddle::framework::SingleGradOpDescMaker::SingleGradOpDescMaker; \ + \ + protected: \ + std::unique_ptr Apply() const override { \ + auto* op = new paddle::framework::OpDesc(); \ + op->SetType(#kernel_type "_grad"); \ + op->SetInput("Y", Input("Y")); \ + op->SetInput(::paddle::framework::GradVarName("Out"), \ + OutputGrad("Out")); \ + op->SetAttrMap(Attrs()); \ + op->SetOutput(::paddle::framework::GradVarName("X"), InputGrad("X")); \ + op->SetOutput(::paddle::framework::GradVarName("Y"), InputGrad("Y")); \ + return std::unique_ptr<::paddle::framework::OpDesc>(op); \ + } \ + } + +#define REGISTER_ELEMWISE_OP(op_type, op_name, equation) \ + class __ElemwiseOp##op_type##Maker__ \ + : public ::paddle::operators::ElementwiseOpMaker { \ + protected: \ + virtual std::string GetName() const { return op_name; } \ + virtual std::string GetEquation() const { return equation; } \ + }; \ + REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ + __ElemwiseOp##op_type##Maker__, \ + ::paddle::operators::ElementwiseOpInferVarType, \ + ::paddle::framework::DefaultGradOpDescMaker); \ + REGISTER_OPERATOR(op_type##_grad, ::paddle::operators::ElementwiseOpGrad) + +#define REGISTER_ELEMWISE_EXPLICIT_OP(op_type, op_name, equation, ...) \ + class __ElemwiseOp##op_type##Maker__ \ + : public ::paddle::operators::ElementwiseOpMaker { \ + protected: \ + virtual std::string GetName() const { return op_name; } \ + virtual std::string GetEquation() const { return equation; } \ + virtual void SetReuse() { Reuse(__VA_ARGS__); } \ + }; \ + REGISTER_OPERATOR(op_type, ::paddle::operators::ElementwiseOp, \ + __ElemwiseOp##op_type##Maker__, \ + ::paddle::operators::ElementwiseOpInferVarType, \ + op_type##GradMaker); \ + REGISTER_OPERATOR(op_type##_grad, \ + ::paddle::operators::ElementwiseOpExplicitGrad) diff --git a/paddle/fluid/operators/elementwise_op_function.h b/paddle/fluid/operators/elementwise_op_function.h index 8b052611f8..bc3e95e904 100644 --- a/paddle/fluid/operators/elementwise_op_function.h +++ b/paddle/fluid/operators/elementwise_op_function.h @@ -13,7 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include #include +#include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" @@ -65,17 +67,21 @@ inline void get_mid_dims(const framework::DDim& x_dims, } } -inline void trim_trailing_singular_dims(framework::DDim* dims) { +inline framework::DDim trim_trailing_singular_dims( + const framework::DDim& dims) { // Remove trailing dimensions of size 1 for y - auto actual_dims_size = dims->size(); + auto actual_dims_size = dims.size(); for (; actual_dims_size != 0; --actual_dims_size) { - if ((*dims)[actual_dims_size - 1] != 1) break; + if (dims[actual_dims_size - 1] != 1) break; } - if (actual_dims_size != dims->size()) { - auto actual_dims = framework::vectorize(*dims); - actual_dims.resize(actual_dims_size); - *dims = framework::make_ddim(actual_dims); + + std::vector trim_dims; + trim_dims.resize(actual_dims_size); + for (int i = 0; i < actual_dims_size; ++i) { + trim_dims[i] = dims[i]; } + framework::DDim actual_dims = framework::make_ddim(trim_dims); + return actual_dims; } template @@ -456,6 +462,71 @@ static void ElemwiseGradBroadcast2CUDA(cudaStream_t stream, const T* x, #endif +template +void ElemwiseGradComputeNoBroadcast( + const framework::ExecutionContext& ctx, const framework::DDim& x_dim, + const framework::DDim& y_dim, const framework::Tensor& x, + const framework::Tensor& y, const framework::Tensor& out, + const framework::Tensor& dout, int axis, framework::Tensor* dx, + framework::Tensor* dy, DX_OP dx_op, DY_OP dy_op) { + size_t N = static_cast(framework::product(x_dim)); + platform::ForRange for_range( + ctx.template device_context(), N); + for_range(ElemwiseGradNoBroadcast{ + x.data(), y.data(), out.data(), dout.data(), dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())}); +} + +template +void ElemwiseGradComputeWithBroadcast( + const framework::ExecutionContext& ctx, const framework::DDim& x_dim, + const framework::DDim& y_dim_untrimed, const framework::Tensor& x, + const framework::Tensor& y, const framework::Tensor& out, + const framework::Tensor& dout, int axis, framework::Tensor* dx, + framework::Tensor* dy, DX_OP dx_op, DY_OP dy_op) { + axis = (axis == -1 ? x_dim.size() - y_dim_untrimed.size() : axis); + auto y_dim = trim_trailing_singular_dims(y_dim_untrimed); + axis = (y_dim.size() == 0) ? x_dim.size() : axis; + + int pre, n, post; + get_mid_dims(x_dim, y_dim, axis, &pre, &n, &post); + if (post == 1) { + int h = pre; + int w = n; + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef __NVCC__ + ElemwiseGradBroadcast1CUDA( + ctx.template device_context().stream(), x.data(), + y.data(), out.data(), dout.data(), h, w, dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); +#endif + } else { + ElemwiseGradBroadcast1CPU( + x.data(), y.data(), out.data(), dout.data(), h, w, dx_op, + dy_op, dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); + } + } else { + if (platform::is_gpu_place(ctx.GetPlace())) { +#ifdef __NVCC__ + ElemwiseGradBroadcast2CUDA( + ctx.template device_context().stream(), x.data(), + y.data(), out.data(), dout.data(), pre, n, post, dx_op, + dy_op, dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); +#endif + } else { + ElemwiseGradBroadcast2CPU( + x.data(), y.data(), out.data(), dout.data(), pre, n, post, + dx_op, dy_op, + dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), + dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); + } + } +} + template void ElemwiseGradCompute(const framework::ExecutionContext& ctx, const framework::Tensor& x, const framework::Tensor& y, @@ -463,63 +534,50 @@ void ElemwiseGradCompute(const framework::ExecutionContext& ctx, const framework::Tensor& dout, int axis, framework::Tensor* dx, framework::Tensor* dy, DX_OP dx_op, DY_OP dy_op) { + const framework::DDim& x_dim = x.dims(); + const framework::DDim& y_dim = y.dims(); if (x.dims() == y.dims()) { - size_t N = static_cast(framework::product(x.dims())); - platform::ForRange for_range( - ctx.template device_context(), N); - for_range(ElemwiseGradNoBroadcast{ - x.data(), y.data(), out.data(), dout.data(), dx_op, dy_op, - dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), - dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())}); + ElemwiseGradComputeNoBroadcast( + ctx, x_dim, y_dim, x, y, out, dout, axis, dx, dy, dx_op, dy_op); } else { // Y is a scalar - auto x_dim = x.dims(); - auto y_dim = y.dims(); - - axis = (axis == -1 ? x_dim.size() - y_dim.size() : axis); - trim_trailing_singular_dims(&y_dim); - axis = (y_dim.size() == 0) ? x_dim.size() : axis; - - int pre, n, post; - get_mid_dims(x_dim, y_dim, axis, &pre, &n, &post); - if (post == 1) { - int h = pre; - int w = n; - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef __NVCC__ - ElemwiseGradBroadcast1CUDA( - ctx.template device_context().stream(), x.data(), - y.data(), out.data(), dout.data(), h, w, dx_op, dy_op, - dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), - dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); -#endif - } else { - ElemwiseGradBroadcast1CPU( - x.data(), y.data(), out.data(), dout.data(), h, w, - dx_op, dy_op, - dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), - dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); - } - } else { - if (platform::is_gpu_place(ctx.GetPlace())) { -#ifdef __NVCC__ - ElemwiseGradBroadcast2CUDA( - ctx.template device_context().stream(), x.data(), - y.data(), out.data(), dout.data(), pre, n, post, dx_op, - dy_op, - dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), - dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); -#endif - } else { - ElemwiseGradBroadcast2CPU( - x.data(), y.data(), out.data(), dout.data(), pre, n, - post, dx_op, dy_op, - dx == nullptr ? nullptr : dx->mutable_data(ctx.GetPlace()), - dy == nullptr ? nullptr : dy->mutable_data(ctx.GetPlace())); - } + ElemwiseGradComputeWithBroadcast( + ctx, x_dim, y_dim, x, y, out, dout, axis, dx, dy, dx_op, dy_op); + } +} + +// NOTE(dzhwinter): Only used in elementwise_add, elementwise_sub. +// explicit gradient can cut off X, Y, Out from gradient op +// In elementwise_add, elementwise_sub, we use dout as fake X, Y, Out to reuse +// elementwise code. +template +void ElemwiseExplicitGradCompute(const framework::ExecutionContext& ctx, + const framework::Tensor& x, + const framework::Tensor& y, + const framework::Tensor& out, + const framework::Tensor& dout, int axis, + framework::Tensor* dx, framework::Tensor* dy, + DX_OP dx_op, DY_OP dy_op) { + if (dy == nullptr) { + const framework::DDim& dx_dims = dout.dims(); + auto dy_dims = dx_dims; + ElemwiseGradComputeNoBroadcast( + ctx, dx_dims, dy_dims, x, y, out, dout, axis, dx, dy, dx_op, dy_op); + } else { + if (dout.dims() == dy->dims()) { + const framework::DDim& dx_dims = dout.dims(); + const framework::DDim& dy_dims = dy->dims(); + ElemwiseGradComputeNoBroadcast( + ctx, dx_dims, dy_dims, x, y, out, dout, axis, dx, dy, dx_op, dy_op); + } else { // Y is a scalar + auto dx_dims = dout.dims(); + const framework::DDim& dy_dims = dy->dims(); + ElemwiseGradComputeWithBroadcast( + ctx, dx_dims, dy_dims, x, y, out, dout, axis, dx, dy, dx_op, dy_op); } } } +// Deprecated template void ElementwiseGradCompute(const framework::ExecutionContext& ctx, @@ -547,7 +605,7 @@ void ElementwiseGradCompute(const framework::ExecutionContext& ctx, } axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); - trim_trailing_singular_dims(&y_dims); + trim_trailing_singular_dims(y_dims); axis = (y_dims.size() == 0) ? x_dims.size() : axis; int pre, n, post; @@ -574,19 +632,19 @@ void ElementwiseComputeEx(const framework::ExecutionContext& ctx, x, y, z, ctx.template device_context(), func); auto x_dims = x->dims(); - auto y_dims = y->dims(); - PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + auto y_dims_untrimed = y->dims(); + PADDLE_ENFORCE_GE(x_dims.size(), y_dims_untrimed.size(), "Rank of first input must >= rank of second input."); - if (x_dims == y_dims) { + if (x_dims == y_dims_untrimed) { functor.Run(); return; } - axis = (axis == -1 ? x_dims.size() - y_dims.size() : axis); + axis = (axis == -1 ? x_dims.size() - y_dims_untrimed.size() : axis); PADDLE_ENFORCE(axis >= 0 && axis < x_dims.size(), "Axis should be in range [0, x_dims)"); - trim_trailing_singular_dims(&y_dims); + auto y_dims = trim_trailing_singular_dims(y_dims_untrimed); axis = (y_dims.size() == 0) ? x_dims.size() : axis; int pre, n, post; diff --git a/paddle/fluid/operators/elementwise_pow_op.cc b/paddle/fluid/operators/elementwise_pow_op.cc index 60302c5e59..5fd6bde9ba 100644 --- a/paddle/fluid/operators/elementwise_pow_op.cc +++ b/paddle/fluid/operators/elementwise_pow_op.cc @@ -13,17 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/elementwise_pow_op.h" +#include #include "paddle/fluid/operators/elementwise_op.h" namespace paddle { namespace operators { class ElementwisePowOpMaker : public ElementwiseOpMaker { - public: - ElementwisePowOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Pow", "Out = X ^ Y"); - AddComment(comment_); - } + protected: + std::string GetName() const override { return "Pow"; } + std::string GetEquation() const override { return "Out = X ^ Y"; } }; } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/elementwise_sub_op.cc b/paddle/fluid/operators/elementwise_sub_op.cc index 9d0598fc39..b7224261e6 100644 --- a/paddle/fluid/operators/elementwise_sub_op.cc +++ b/paddle/fluid/operators/elementwise_sub_op.cc @@ -14,25 +14,11 @@ limitations under the License. */ #include "paddle/fluid/operators/elementwise_sub_op.h" #include "paddle/fluid/operators/elementwise_op.h" - -namespace paddle { -namespace operators { -class ElementwiseSubOpMaker : public ElementwiseOpMaker { - public: - ElementwiseSubOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : ElementwiseOpMaker(proto, op_checker) { - SetComment("Sub", "Out = X - Y"); - AddComment(comment_); - } -}; -} // namespace operators -} // namespace paddle - namespace ops = paddle::operators; -REGISTER_OPERATOR(elementwise_sub, ops::ElementwiseOp, - ops::ElementwiseSubOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(elementwise_sub_grad, ops::ElementwiseOpGrad); +REGISTER_ELEMWISE_GRAD_MAKER(elementwise_sub, Sub); +REGISTER_ELEMWISE_EXPLICIT_OP(elementwise_sub, "Sub", "Out = X - Y", "Out", + "X"); + REGISTER_OP_CPU_KERNEL( elementwise_sub, ops::ElementwiseSubKernel, diff --git a/paddle/fluid/operators/elementwise_sub_op.h b/paddle/fluid/operators/elementwise_sub_op.h index fe088b8203..11c7e3fe62 100644 --- a/paddle/fluid/operators/elementwise_sub_op.h +++ b/paddle/fluid/operators/elementwise_sub_op.h @@ -4,7 +4,7 @@ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -55,14 +55,15 @@ class ElementwiseSubGradKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { using Tensor = framework::Tensor; - auto* x = ctx.Input("X"); - auto* y = ctx.Input("Y"); - auto* out = ctx.Input("Out"); auto* dout = ctx.Input(framework::GradVarName("Out")); auto* dx = ctx.Output(framework::GradVarName("X")); auto* dy = ctx.Output(framework::GradVarName("Y")); int axis = ctx.Attr("axis"); - ElemwiseGradCompute, SubGradDY>( + // skip out, x, y + auto* out = dout; + auto *x = dout, *y = dout; + + ElemwiseExplicitGradCompute, SubGradDY>( ctx, *x, *y, *out, *dout, axis, dx, dy, SubGradDX(), SubGradDY()); } }; diff --git a/paddle/fluid/operators/expand_op.cc b/paddle/fluid/operators/expand_op.cc index 4ae91d074d..5ad0ec2513 100644 --- a/paddle/fluid/operators/expand_op.cc +++ b/paddle/fluid/operators/expand_op.cc @@ -56,8 +56,7 @@ class ExpandOp : public framework::OperatorWithKernel { class ExpandOpMaker : public framework::OpProtoAndCheckerMaker { public: - ExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor). A tensor with rank in [1, 6]." "X is the input to be expanded."); diff --git a/paddle/fluid/operators/extract_rows_op.cc b/paddle/fluid/operators/extract_rows_op.cc new file mode 100644 index 0000000000..9a297d03cf --- /dev/null +++ b/paddle/fluid/operators/extract_rows_op.cc @@ -0,0 +1,103 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +class ExtractRowsOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ExtractRowsOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ExtractRowsOp should not be null."); + PADDLE_ENFORCE_EQ(ctx->GetInputsVarType("X")[0], + framework::proto::VarType::SELECTED_ROWS, + "The type of input(X) must be SelectedRows."); + auto in_dims = ctx->GetInputDim("X"); + + ctx->SetOutputDim( + "Out", framework::make_ddim(std::vector{in_dims[0], 1})); + } +}; + +class ExtractRowsOp : public framework::OperatorBase { + public: + ExtractRowsOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : framework::OperatorBase(type, inputs, outputs, attrs) {} + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &in = scope.FindVar(Input("X"))->Get(); + auto out = scope.FindVar(Output("Out"))->GetMutable(); + + auto in_rows = in.rows(); + auto out_dim = framework::make_ddim( + std::vector{static_cast(in_rows.size()), 1}); + auto dst_ptr = out->mutable_data(out_dim, in.place()); + + if (paddle::platform::is_gpu_place(in.place())) { +#ifdef PADDLE_WITH_CUDA + platform::DeviceContextPool &pool = + platform::DeviceContextPool::Instance(); + auto *dev_ctx = pool.Get(in.place()); + auto src_ptr = in_rows.Data(in.place()); + auto stream = + reinterpret_cast(*dev_ctx) + .stream(); + memory::Copy(boost::get(out->place()), dst_ptr, + boost::get(in.place()), src_ptr, + in_rows.size() * sizeof(int64_t), stream); +#else + PADDLE_THROW("Not compiled with CUDA."); +#endif + } else { + memory::Copy(platform::CPUPlace(), dst_ptr, platform::CPUPlace(), + in_rows.data(), in_rows.size() * sizeof(int64_t)); + } + } +}; + +class ExtractRowsOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(SelectedRows). The input tensor of extract_rows operator," + " and its type is SelectedRows."); + AddOutput("Out", "(Tensor). The the rows of input(X)."); + + AddComment(R"DOC( + ExtractRows Operator. + +The function of extract_rows_op is extracting the rows from the input(X) +whose type is SelectedRows. + + )DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(extract_rows, ops::ExtractRowsOp, ops::ExtractRowsOpMaker, + ops::ExtractRowsOpInferShape); diff --git a/paddle/fluid/operators/fake_dequantize_op.cc b/paddle/fluid/operators/fake_dequantize_op.cc new file mode 100644 index 0000000000..43f9491111 --- /dev/null +++ b/paddle/fluid/operators/fake_dequantize_op.cc @@ -0,0 +1,76 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fake_dequantize_op.h" +#include + +namespace paddle { +namespace operators { + +class FakeDequantizeMaxAbsOp : public framework::OperatorWithKernel { + public: + FakeDequantizeMaxAbsOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of FakeDequantizeMaxAbsOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FakeDequantizeMaxAbsOp should not be null."); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class FakeDequantizeMaxAbsOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor) The input with float-32/64 type is the " + "low precision tensor."); + AddOutput("Out", + "(Tensor) The output is the dequantized high " + "precision tensor."); + AddAttr("num_bits", + "(int) `num_bits` is the quantization level bits, " + "such as 2, 5, 8."); + AddAttr("scale", + "(float) The maximum absolute value of low precision tensor." + "It is usually calculated by the fake_quantize_max_abs_op."); + AddComment(R"DOC( +FakeDequantizeMaxAbsOp operator. + +This calculation is an opposite operation of FakeQuantizeMaxAbsOp: + +$$Out = \frac{scale*X}{2^{num_bits} - 1}$$ + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +using CPU = paddle::platform::CPUDeviceContext; + +REGISTER_OPERATOR(fake_dequantize_max_abs, ops::FakeDequantizeMaxAbsOp, + ops::FakeDequantizeMaxAbsOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(fake_dequantize_max_abs, + ops::FakeDequantizeMaxAbsKernel, + ops::FakeDequantizeMaxAbsKernel); diff --git a/paddle/fluid/operators/matmul_op.cu.cc b/paddle/fluid/operators/fake_dequantize_op.cu similarity index 68% rename from paddle/fluid/operators/matmul_op.cu.cc rename to paddle/fluid/operators/fake_dequantize_op.cu index e021bbe645..1bd38d1bd2 100644 --- a/paddle/fluid/operators/matmul_op.cu.cc +++ b/paddle/fluid/operators/fake_dequantize_op.cu @@ -12,11 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/matmul_op.h" +#include "paddle/fluid/operators/fake_dequantize_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - matmul, ops::MatMulKernel); -REGISTER_OP_CUDA_KERNEL( - matmul_grad, - ops::MatMulGradKernel); +using CUDA = paddle::platform::CUDADeviceContext; +REGISTER_OP_CUDA_KERNEL(fake_dequantize_max_abs, + ops::FakeDequantizeMaxAbsKernel, + ops::FakeDequantizeMaxAbsKernel); diff --git a/paddle/fluid/operators/fake_dequantize_op.h b/paddle/fluid/operators/fake_dequantize_op.h new file mode 100644 index 0000000000..0901e68b37 --- /dev/null +++ b/paddle/fluid/operators/fake_dequantize_op.h @@ -0,0 +1,42 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { +template +class FakeDequantizeMaxAbsKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + auto* in = ctx.Input("X"); + auto* out = ctx.Output("Out"); + out->mutable_data(in->place()); + + int num_bits = ctx.Attr("num_bits"); + T scale = static_cast(ctx.Attr("scale")); + int range = std::pow(2, num_bits) - 1; + + auto eigen_out = framework::EigenVector::Flatten(*out); + auto eigen_in = framework::EigenVector::Flatten(*in); + auto& dev = *ctx.template device_context().eigen_device(); + eigen_out.device(dev) = (scale / range) * eigen_in; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/fake_quantize_op.cc b/paddle/fluid/operators/fake_quantize_op.cc new file mode 100644 index 0000000000..a91e0f520e --- /dev/null +++ b/paddle/fluid/operators/fake_quantize_op.cc @@ -0,0 +1,112 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fake_quantize_op.h" +#include + +namespace paddle { +namespace operators { + +class FakeQuantizeOp : public framework::OperatorWithKernel { + public: + FakeQuantizeOp(const std::string &type, + const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of FakeQuantizeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of FakeQuantizeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutMovingScale"), + "OutMovingScale(Out) of FakeQuantizeOp should not be null"); + // if (ctx->HasInput("InMovingScale")) { + ctx->SetOutputDim("OutMovingScale", ctx->GetInputDim("InMovingScale")); + //} + // if (ctx->HasInput("InScales")) { + PADDLE_ENFORCE(ctx->HasOutput("OutScales"), + "OutScales(Out) of FakeQuantizeOp should not be null"); + ctx->SetOutputDim("OutScales", ctx->GetInputDim("InScales")); + // PADDLE_ENFORCE_EQ(ctx->Inputs("InScales")[0], + // ctx->Outputs("OutScales")[0], + // "Mean and MeanOut should share the same memory"); + //} + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); + ctx->ShareLoD("X", /*->*/ "Out"); + } +}; + +class FakeQuantizeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor) Input tensor of scale operator."); + AddInput("InScales", "(Tensor) scale buffer, used in static quantization.") + .AsDispensable(); + AddInput("InMovingScale", "Last scale, used in static quantization.") + .AsDispensable(); + AddInput("InCurrentIter", + "Last iteration number, used in static quantization.") + .AsDispensable(); + AddOutput("Out", "(Tensor) Output of quantized low level tensor."); + AddOutput("OutScales", + "(Tensor) scale buffer, used in static quantization.") + .AsDispensable(); + AddOutput("OutMovingScale", " Current scale"); + AddOutput("OutCurrentIter", "Current iteration number.").AsDispensable(); + AddAttr("quantize_type", + "(string, default abs_max)" + "The scaling tpe of the quantize operator.") + .SetDefault("abs_max"); + AddAttr("window_size", "(int, default 10000)").SetDefault(10000); + AddAttr("bit_length", "(int, default 8)") + .SetDefault(8) + .AddCustomChecker([](const int &bit_length) { + PADDLE_ENFORCE(bit_length >= 1 && bit_length <= 16, + "'bit_length' should be between 1 and 16."); + }); + AddAttr("is_test", "").SetDefault(false); + AddComment(R"DOC( +FakeQuantize operator + +quantize_type = abs_max: + + $$scale = max(abs(x))$$ + +quantize_type = range_abs_max: + + $$scale = max(max(abs(x)), history_abs_max)$$ + +quantize_type = moving_average_abs_max: + + $$scale = 0.1*scale+0.9*new_abs_max)$$ + +$$Out = scale*X$$ + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(fake_quantize, ops::FakeQuantizeOp, ops::FakeQuantizeOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL( + fake_quantize, + ops::FakeQuantizeKernel, + ops::FakeQuantizeKernel); diff --git a/paddle/fluid/operators/fake_quantize_op.cu b/paddle/fluid/operators/fake_quantize_op.cu new file mode 100644 index 0000000000..be0c6730a5 --- /dev/null +++ b/paddle/fluid/operators/fake_quantize_op.cu @@ -0,0 +1,272 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/operators/fake_quantize_op.h" +#include "paddle/fluid/platform/cuda_primitives.h" + +namespace paddle { +namespace operators { + +template +__global__ void FindAbsMaxKernel(const int n, const T* in, T* out) { + int bid = threadIdx.x + blockIdx.x * blockDim.x; + int tid = threadIdx.x; + + extern __shared__ T shared_max_data[]; + if (gridDim.x > 1) { + shared_max_data[tid] = T(0); + for (int i = bid; i < n; i += blockDim.x * gridDim.x) { + T tmp = fabs(in[i]); + if (tmp > shared_max_data[tid]) { + shared_max_data[tid] = tmp; + } + } + } else { + if (bid < n) { + shared_max_data[tid] = fabs(in[bid]); + } else { + shared_max_data[tid] = T(0); + } + } + __syncthreads(); + + for (int i = blockDim.x / 2; i > 0; i >>= 1) { + if (tid < i && shared_max_data[tid] < shared_max_data[tid + i]) { + shared_max_data[tid] = shared_max_data[tid + i]; + } + __syncthreads(); + } + if (tid == 0) { + out[blockIdx.x] = shared_max_data[0]; + } +} + +float FindAbsMaxGpu(const platform::CUDADeviceContext& ctx, const float* array, + int length) { + float host_max; + int kNumTheads = 1024; + int gridDimx = (kNumTheads - 1 + length) / kNumTheads; + gridDimx = (gridDimx > kNumTheads) ? kNumTheads : gridDimx; + framework::Tensor t; + float* device_max = t.mutable_data(framework::make_ddim({gridDimx}), + platform::CUDAPlace()); + FindAbsMaxKernel<<>>(length, array, device_max); + FindAbsMaxKernel< + float><<<1, kNumTheads, kNumTheads * sizeof(float), ctx.stream()>>>( + gridDimx, device_max, device_max); + PADDLE_ENFORCE_EQ( + cudaMemcpy(&host_max, device_max, sizeof(float), cudaMemcpyDeviceToHost), + cudaSuccess, "cudaMemcpy failed"); + return host_max; +} + +template +__global__ void ApplySaturateKernel(const int n, const T* in, T* out, + int* num_saturate, const T min, + const T max) { + int bid = threadIdx.x + blockIdx.x * blockDim.x; + int tid = threadIdx.x; + + extern __shared__ int shared_count[]; + shared_count[tid] = 0; + for (int i = bid; i < n; i += blockDim.x * gridDim.x) { + if (in[i] > max) { + out[i] = max; + shared_count[tid] += 1; + } else if (in[i] < min) { + out[i] = min; + shared_count[tid] += 1; + } else { + out[i] = in[i]; + } + } + __syncthreads(); + + for (int i = blockDim.x / 2; i > 0; i >>= 1) { + if (tid < i) { + shared_count[tid] += shared_count[tid + i]; + } + __syncthreads(); + } + if (tid == 0) { + num_saturate[blockIdx.x] = shared_count[0]; + } +} + +template +__global__ void ReduceKernel(const int n, const T* in, T* out) { + int tid = threadIdx.x; + extern __shared__ T shared_sum[]; + if (tid < n) { + shared_sum[tid] = in[tid]; + } else { + shared_sum[tid] = T(0); + } + __syncthreads(); + // blockDim.x must >= n + for (int i = (n + 1) / 2; i > 0; i >>= 1) { + if (tid < i) { + shared_sum[tid] += shared_sum[tid + i]; + } + __syncthreads(); + } + if (tid == 0) { + out[0] = shared_sum[0]; + } +} + +template +int ApplySaturateGpu(const platform::CUDADeviceContext& ctx, const int n, + const T* in, T* out, const T min, const T max) { + int host_num_saturate; + int kNumTheads = 1024; + int gridDimx = (n + kNumTheads - 1) / kNumTheads; + gridDimx = (gridDimx > kNumTheads) ? kNumTheads : gridDimx; + framework::Tensor t; + int* device_num_saturate = t.mutable_data( + framework::make_ddim({gridDimx}), platform::CUDAPlace()); + ApplySaturateKernel< + T><<>>( + n, in, out, device_num_saturate, min, max); + ReduceKernel<<<1, kNumTheads, kNumTheads * sizeof(T), ctx.stream()>>>( + gridDimx, device_num_saturate, device_num_saturate); + PADDLE_ENFORCE_EQ(cudaSuccess, + cudaMemcpy(&host_num_saturate, device_num_saturate, + sizeof(int), cudaMemcpyDeviceToHost), + "cudaMemcpy failed"); + return host_num_saturate; +} + +template +class FakeQuantizeCUDAKernel : public framework::OpKernel { + public: + T FindRangeAbsMax(const platform::CUDADeviceContext& ctx, + framework::Tensor* scale_list, framework::Tensor* out_scale, + const T& cur_scale, int window_size, + int current_iter) const { + T* sl = scale_list->mutable_data(platform::CPUPlace()); + T remove_tmp = sl[current_iter]; + sl[current_iter] = cur_scale; + T& max_scale = out_scale->mutable_data(platform::CPUPlace())[0]; + if (max_scale < cur_scale) { + max_scale = cur_scale; + } else if (fabs(remove_tmp - max_scale) < 1e-6) { + int size = (current_iter > window_size) ? window_size : current_iter; + max_scale = T(FindAbsMaxGpu(ctx, scale_list->data(), size)); + } + return max_scale; + } + + T FindMovingAverageAbsMmax(framework::Tensor* in_scale, + framework::Tensor* out_scale, + const T& cur_scale) const { + T* ins = in_scale->mutable_data(platform::CPUPlace()); + T* outs = out_scale->mutable_data(platform::CPUPlace()); + outs[0] = 0.9 * cur_scale + 0.1 * ins[0]; + return T(outs[0]); + } + + virtual void Compute(const framework::ExecutionContext& context) const { + PADDLE_ENFORCE(platform::is_gpu_place(context.GetPlace()), + "This kernel only runs on GPU device."); + auto& device_ctx = context.cuda_device_context(); + auto* tensor = context.Output("Out"); + auto* in = context.Input("X"); + const bool is_test = context.Attr("is_test"); + tensor->mutable_data(in->place()); + context.Output("OutMovingScale") + ->mutable_data( + context.Input("InMovingScale")->place()); + auto quantize_type = + static_cast(context.Attr("quantize_type")); + if (quantize_type == std::string("range_abs_max")) { + context.Output("OutScales") + ->mutable_data( + context.Input("InScales")->place()); + context.Output("OutCurrentIter") + ->mutable_data( + context.Input("InCurrentIter")->place()); + } + + T scale = T(1); + int window_size = context.Attr("window_size"); + T bin_cnt = (T)((1 << (context.Attr("bit_length") - 1)) - 1); + if (quantize_type == std::string("abs_max")) { + auto* saving_scale = context.Output("OutMovingScale"); + scale = (T)FindAbsMaxGpu(device_ctx, in->data(), in->numel()); + saving_scale->mutable_data(platform::CPUPlace())[0] = scale; + + auto& device_ctx = context.template device_context(); + auto* scale_list = context.Output("OutScales"); + math::SetConstant scalar; + scale_list->mutable_data(context.GetPlace()); + scalar(device_ctx, scale_list, static_cast(0)); + auto* iter = context.Output("OutCurrentIter"); + iter->mutable_data(context.GetPlace()); + scalar(device_ctx, iter, static_cast(0)); + } else if (quantize_type == std::string("range_abs_max")) { + auto* moving_scale = const_cast( + context.Input("InMovingScale")); + if (is_test) { + scale = moving_scale->mutable_data(platform::CPUPlace())[0]; + } else { + auto* it = const_cast( + context.Input("InCurrentIter")); + auto* iter = context.Output("OutCurrentIter"); + int* last_iter = it->mutable_data(platform::CPUPlace()); + int* current_iter = iter->mutable_data(platform::CPUPlace()); + auto* scale_list = context.Output("OutScales"); + auto* saving_scale = + context.Output("OutMovingScale"); + scale = (T)FindAbsMaxGpu(device_ctx, in->data(), in->numel()); + scale = FindRangeAbsMax(device_ctx, scale_list, saving_scale, scale, + window_size, current_iter[0]); + (*current_iter) = (*last_iter) + 1; + } + } else if (quantize_type == std::string("moving_average_abs_max")) { + auto* moving_scale = const_cast( + context.Input("InMovingScale")); + if (is_test) { + scale = moving_scale->mutable_data(platform::CPUPlace())[0]; + } else { + scale = (T)FindAbsMaxGpu(device_ctx, in->data(), in->numel()); + auto* saving_scale = + context.Output("OutMovingScale"); + scale = FindMovingAverageAbsMmax( + const_cast(moving_scale), saving_scale, scale); + } + } + + ApplySaturateGpu(device_ctx, in->numel(), in->data(), + tensor->mutable_data(in->place()), -scale, scale); + scale = bin_cnt / scale; + + auto& dev = + *context.template device_context().eigen_device(); + auto eigen_out = framework::EigenVector::Flatten(*tensor); + auto eigen_in = framework::EigenVector::Flatten(*tensor); + eigen_out.device(dev) = (scale * eigen_in).round(); + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_CUDA_KERNEL(fake_quantize, + paddle::operators::FakeQuantizeCUDAKernel< + paddle::platform::CUDADeviceContext, float>, + paddle::operators::FakeQuantizeCUDAKernel< + paddle::platform::CUDADeviceContext, double>); diff --git a/paddle/fluid/operators/fake_quantize_op.h b/paddle/fluid/operators/fake_quantize_op.h new file mode 100644 index 0000000000..80f71d85dd --- /dev/null +++ b/paddle/fluid/operators/fake_quantize_op.h @@ -0,0 +1,155 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/clip_op.h" +#include "paddle/fluid/operators/math/blas.h" +#include "paddle/fluid/platform/transform.h" + +namespace paddle { +namespace operators { + +using platform::Transform; + +template +class FakeQuantizeKernel : public framework::OpKernel { + public: + T FindAbsMax(framework::Tensor* in, int n) const { + T* p = in->mutable_data(platform::CPUPlace()); + T abs_max = (T)0.00000001; + for (int i = 0; i < n; i++) { + T tmp = fabs(p[i]); + if (tmp > abs_max) abs_max = tmp; + } + return T(abs_max); + } + T FindRangeAbsMax(framework::Tensor* scale_list, framework::Tensor* out_scale, + const T& cur_scale, int window_size, + int current_iter) const { + T* sl = scale_list->mutable_data(platform::CPUPlace()); + T remove_tmp = sl[current_iter]; + sl[current_iter] = cur_scale; + T& max_scale = out_scale->mutable_data(platform::CPUPlace())[0]; + if (max_scale < cur_scale) { + max_scale = cur_scale; + } else if (fabs(remove_tmp - max_scale) < 1e-6) { + int size = (current_iter > window_size) ? window_size : current_iter; + max_scale = T(FindAbsMax(scale_list, size)); + } + return max_scale; + } + + T FindMovingAverageAbsMmax(framework::Tensor* in_scale, + framework::Tensor* out_scale, + const T& cur_scale) const { + T* ins = in_scale->mutable_data(platform::CPUPlace()); + T* outs = out_scale->mutable_data(platform::CPUPlace()); + outs[0] = 0.9 * cur_scale + 0.1 * ins[0]; + return T(outs[0]); + } + + virtual void Compute(const framework::ExecutionContext& context) const { + auto* tensor = context.Output("Out"); + auto* in = context.Input("X"); + const bool is_test = context.Attr("is_test"); + tensor->mutable_data(in->place()); + + auto* oms_tensor = context.Output("OutMovingScale"); + oms_tensor->mutable_data(in->place()); + + auto quantize_type = + static_cast(context.Attr("quantize_type")); + if (quantize_type == std::string("range_abs_max")) { + auto* oss_tensor = context.Output("OutScales"); + oss_tensor->mutable_data( + context.Input("InScales")->place()); + auto* oci_tensor = context.Output("OutCurrentIter"); + oci_tensor->mutable_data( + context.Input("InCurrentIter")->place()); + } + + T scale = static_cast(1); + int window_size = context.Attr("window_size"); + int bit_length = context.Attr("bit_length"); + int bin_cnt = std::pow(2, bit_length - 1) - 1; + + auto& dev = + *context.template device_context().eigen_device(); + auto raw_in = framework::EigenVector::Flatten(*in); + if (quantize_type == std::string("abs_max")) { + auto* saving_scale = context.Output("OutMovingScale"); + auto scale_out = framework::EigenVector::Flatten(*saving_scale); + scale_out.device(dev) = raw_in.abs().maximum(); + scale = scale_out(0); + + auto& device_ctx = context.template device_context(); + auto* scale_list = context.Output("OutScales"); + math::SetConstant scalar; + scale_list->mutable_data(context.GetPlace()); + scalar(device_ctx, scale_list, static_cast(0)); + auto* iter = context.Output("OutCurrentIter"); + iter->mutable_data(context.GetPlace()); + scalar(device_ctx, iter, static_cast(0)); + } else if (quantize_type == std::string("range_abs_max")) { + auto* moving_scale = context.Input("InMovingScale"); + if (is_test) { + scale = moving_scale->data()[0]; + } else { + auto* it = context.Input("InCurrentIter"); + auto* iter = context.Output("OutCurrentIter"); + const int* last_iter = it->data(); + int* current_iter = iter->mutable_data(platform::CPUPlace()); + auto* scale_list = context.Output("OutScales"); + auto* saving_scale = + context.Output("OutMovingScale"); + auto scale_out = framework::EigenVector::Flatten(*saving_scale); + scale_out.device(dev) = raw_in.abs().maximum(); + scale = saving_scale->mutable_data(platform::CPUPlace())[0]; + scale = FindRangeAbsMax(scale_list, saving_scale, scale, window_size, + current_iter[0]); + saving_scale->mutable_data(platform::CPUPlace())[0] = scale; + (*current_iter) = (*last_iter) + 1; + } + } else if (quantize_type == std::string("moving_average_abs_max")) { + auto* moving_scale = context.Input("InMovingScale"); + if (is_test) { + scale = moving_scale->data()[0]; + } else { + auto* saving_scale = + context.Output("OutMovingScale"); + auto scale_out = framework::EigenVector::Flatten(*saving_scale); + scale_out.device(dev) = raw_in.abs().maximum(); + scale = saving_scale->mutable_data(platform::CPUPlace())[0]; + scale = FindMovingAverageAbsMmax( + const_cast(moving_scale), saving_scale, scale); + saving_scale->mutable_data(platform::CPUPlace())[0] = scale; + } + } + + Transform trans; + trans(context.template device_context(), in->data(), + in->data() + in->numel(), tensor->mutable_data(in->place()), + ClipFunctor(-scale, scale)); + auto eigen_out = framework::EigenVector::Flatten(*tensor); + auto eigen_in = framework::EigenVector::Flatten(*tensor); + eigen_out.device(dev) = (bin_cnt / scale * eigen_in).round(); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/fc_mkldnn_op.cc b/paddle/fluid/operators/fc_mkldnn_op.cc index 847b7b0c12..99fa659a35 100644 --- a/paddle/fluid/operators/fc_mkldnn_op.cc +++ b/paddle/fluid/operators/fc_mkldnn_op.cc @@ -115,6 +115,7 @@ class MKLDNNMemory { template class FCMKLDNNOpKernel : public paddle::framework::OpKernel { + public: void Compute(const paddle::framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), "It must use CPUPlace."); diff --git a/paddle/fluid/operators/fc_op.cc b/paddle/fluid/operators/fc_op.cc index 45e4d5b2b8..a9ae1396db 100644 --- a/paddle/fluid/operators/fc_op.cc +++ b/paddle/fluid/operators/fc_op.cc @@ -43,7 +43,7 @@ void FCOp::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType FCOp::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { framework::LibraryType library{framework::LibraryType::kMKLDNN}; - framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + framework::DataLayout layout{framework::DataLayout::kMKLDNN}; return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), @@ -65,15 +65,14 @@ void FCOpGrad::InferShape(framework::InferShapeContext* ctx) const { framework::OpKernelType FCOpGrad::GetExpectedKernelType( const framework::ExecutionContext& ctx) const { framework::LibraryType library{framework::LibraryType::kMKLDNN}; - framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + framework::DataLayout layout{framework::DataLayout::kMKLDNN}; return framework::OpKernelType( framework::ToDataType(ctx.Input("Input")->type()), ctx.GetPlace(), layout, library); } -FCOpMaker::FCOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void FCOpMaker::Make() { AddInput("Input", "(Tensor) The input tensor of fully connected operator. "); AddInput("W", "(Tensor), The second input tensor of fc op."); AddOutput("Out", "(Tensor) The output tensor of fully connected operator. "); diff --git a/paddle/fluid/operators/fc_op.h b/paddle/fluid/operators/fc_op.h index 70fa96440d..e1b780fc0c 100644 --- a/paddle/fluid/operators/fc_op.h +++ b/paddle/fluid/operators/fc_op.h @@ -45,7 +45,7 @@ class FCOpGrad : public framework::OperatorWithKernel { class FCOpMaker : public framework::OpProtoAndCheckerMaker { public: - FCOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; } // namespace operators diff --git a/paddle/fluid/operators/feed_op.cc b/paddle/fluid/operators/feed_op.cc index debacf07c3..dc7ef66495 100644 --- a/paddle/fluid/operators/feed_op.cc +++ b/paddle/fluid/operators/feed_op.cc @@ -31,7 +31,6 @@ class FeedOp : public framework::OperatorBase { const platform::Place &place) const override { // get device context from pool auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); - platform::RecordEvent record_event(Type(), dev_ctx); auto feed_var_name = Input("X"); auto *feed_var = scope.FindVar(feed_var_name); @@ -66,8 +65,7 @@ class FeedOp : public framework::OperatorBase { class FeedOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FeedOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of feed op"); AddOutput("Out", "The output of feed op"); AddAttr("col", "(int) The column of feed"); diff --git a/paddle/fluid/operators/fetch_barrier_op.cc b/paddle/fluid/operators/fetch_barrier_op.cc new file mode 100644 index 0000000000..d9cd956dfd --- /dev/null +++ b/paddle/fluid/operators/fetch_barrier_op.cc @@ -0,0 +1,81 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include // NOLINT +#include + +#include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/framework.pb.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { +namespace operators { + +class FetchBarrierOp : public framework::OperatorBase { + public: + FetchBarrierOp(const std::string& type, + const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void RunImpl(const framework::Scope& scope, + const platform::Place& place) const override { + std::vector eps = Attr>("endpoints"); + distributed::RPCClient* rpc_client = + distributed::RPCClient::GetInstance(); + + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); + + for (auto& ep : eps) { + VLOG(3) << "fetch barrier, ep: " << ep; + rpc_client->AsyncSendFetchBarrier(ep); + } + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); + } +}; + +class FetchBarrierOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() { + AddComment(R"DOC( +SendBarrier operator + +This operator will send a send barrier signal to list_and_serv op, so that +the Parameter Server would knew all variables have been sent. +)DOC"); + + AddAttr>("endpoints", + "(string vector, default 127.0.0.1:6164)" + "Server endpoints to send variables to.") + .SetDefault({"127.0.0.1:6164"}); + } +}; + +class FetchBarrierOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(fetch_barrier, ops::FetchBarrierOp, + paddle::framework::EmptyGradOpMaker, ops::FetchBarrierOpMaker, + ops::FetchBarrierOpShapeInference); diff --git a/paddle/fluid/operators/fetch_op.cc b/paddle/fluid/operators/fetch_op.cc index 18deec5813..c197b45e81 100644 --- a/paddle/fluid/operators/fetch_op.cc +++ b/paddle/fluid/operators/fetch_op.cc @@ -30,9 +30,6 @@ class FetchOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); - platform::RecordEvent record_event(Type(), pool.Get(place)); - auto fetch_var_name = Input("X"); auto *fetch_var = scope.FindVar(fetch_var_name); PADDLE_ENFORCE(fetch_var != nullptr, @@ -66,8 +63,7 @@ class FetchOp : public framework::OperatorBase { class FetchOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - FetchOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of fetch op"); AddOutput("Out", "The output of fetch op"); AddAttr("col", "(int) The column of fetch"); diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc index 72da80baaf..453a1b32a0 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.cc +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.cc @@ -30,19 +30,18 @@ class FillConstantBatchSizeLikeOp : public BatchSizeLikeOp { }; class FillConstantBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { - public: - FillConstantBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : BatchSizeLikeOpMaker(proto, op_checker) { - AddAttr("dtype", - "(int, default 5 (FP32)) " - "Output data type") + protected: + void Apply() override { + AddAttr( + "dtype", + "It could be numpy.dtype. Output data type. Default is float32") .SetDefault(framework::proto::VarType::FP32); - AddAttr("value", "(float, default 0) The value to be filled") + AddAttr("value", "default 0. The value to be filled") .SetDefault(0.0f); AddComment(R"DOC( -FillConstantBatchSizeLike Operator. - -Fill up a variable with specified constant value. +This function creates a tensor of specified *shape*, *dtype* and batch size, +and initializes this with a constant supplied in *value*. The batch size is +obtained from the `input` tensor. )DOC"); } diff --git a/paddle/fluid/operators/fill_constant_batch_size_like_op.h b/paddle/fluid/operators/fill_constant_batch_size_like_op.h index 2a7df149a9..63ea60678f 100644 --- a/paddle/fluid/operators/fill_constant_batch_size_like_op.h +++ b/paddle/fluid/operators/fill_constant_batch_size_like_op.h @@ -24,6 +24,14 @@ class FillConstantBatchSizeLikeOpKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* out = ctx.Output("Out"); + auto* in = ctx.Input("Input"); + if (in->lod().size() && ctx.Attr("input_dim_idx") == 0) { + // set the correct batch size for the LoDTensor. + auto odims = out->dims(); + int output_dim_idx = ctx.Attr("output_dim_idx"); + odims[output_dim_idx] = static_cast(in->lod().back().size()) - 1; + out->mutable_data(odims, ctx.GetPlace()); + } out->mutable_data(ctx.GetPlace()); auto value = ctx.Attr("value"); diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index 07e0a80f8d..130f18dde4 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -59,8 +59,7 @@ class FillConstantOp : public framework::OperatorBase { class FillConstantOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillConstantOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddAttr("dtype", "(int, default 5 (FP32)) " "Output data type") diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index ee8a2fc353..925dc19061 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -82,8 +82,7 @@ class FillOp : public framework::OperatorBase { class FillOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddComment(R"DOC(Fill operator Fill an tensor with `value` and `shape`. The type of the tensor is specify by diff --git a/paddle/fluid/operators/fill_zeros_like_op.cc b/paddle/fluid/operators/fill_zeros_like_op.cc index 58c814ba64..d67bec36b3 100644 --- a/paddle/fluid/operators/fill_zeros_like_op.cc +++ b/paddle/fluid/operators/fill_zeros_like_op.cc @@ -33,8 +33,7 @@ class FillZerosLikeOp : public framework::OperatorWithKernel { class FillZerosLikeOpMaker : public framework::OpProtoAndCheckerMaker { public: - FillZerosLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of fill-zeros-like op."); AddOutput("Out", "The variable will be filled up with zeros."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/flatten_op.cc b/paddle/fluid/operators/flatten_op.cc new file mode 100644 index 0000000000..fdda01381e --- /dev/null +++ b/paddle/fluid/operators/flatten_op.cc @@ -0,0 +1,169 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class FlattenOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input (X) of Flatten op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output (Output) of Flatten op should not be null."); + const auto &axis = ctx->Attrs().Get("axis"); + const auto &in_dims = ctx->GetInputDim("X"); + PADDLE_ENFORCE(axis >= 0, "The axis should be greater than or equal to 0."); + PADDLE_ENFORCE( + axis <= in_dims.size(), + "The axis should be less than or equal to input tensor's rank."); + + const auto &out_dims = GetOutputShape(axis, in_dims); + ctx->SetOutputDim("Out", framework::make_ddim(out_dims)); + if (in_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", "Out"); + } + } + + static std::vector GetOutputShape(const int axis, + const framework::DDim &in_dims) { + int64_t outer = 1, inner = 1; + for (int i = 0; i < in_dims.size(); ++i) { + if (i < axis) { + outer *= in_dims[i]; + } else { + inner *= in_dims[i]; + } + } + std::vector out_shape(2); + out_shape[0] = outer; + out_shape[1] = inner; + return out_shape; + } +}; + +class FlattenOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &axis = Attr("axis"); + auto in_dims = + scope.FindVar(Input("X"))->Get().dims(); + const auto &out_dims = FlattenOpInferShape::GetOutputShape(axis, in_dims); + + framework::AttributeMap attrs; + attrs["shape"] = out_dims; + attrs["inplace"] = false; + // Invoke Reshape Op + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape", {{"X", {Input("X")}}, {"Shape", {}}}, + {{"Out", {Output("Out")}}}, attrs); + reshape_op->Run(scope, place); + } +}; + +class FlattenOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor) A tensor of rank >= axis."); + AddOutput("Out", + "A 2D tensor is reshaped input tensor. The input dimensions" + "up to axis are flattened to the outer dimension of the output" + "and the remaining input dimensions are flattened into the inner" + "dimension of the output."); + AddAttr("axis", + "(int)" + "Indicate up to which input dimensions (exclusive) should be" + "flattened to the outer dimension of the output. The value" + "for axis must be in the range [0, R], where R is the rank of" + "the input tensor. When axis = 0, the shape of the output" + "tensor is (1, (d_0 X d_1 ... d_n), where the shape of the" + "input tensor is (d_0, d_1, ... d_n).") + .SetDefault(1); + AddComment(R"DOC( +Flatten Operator + +Flattens the input tensor into a 2D matrix. + +Examples: +Case 1: + Given + X.shape = (3, 100, 100, 4) + and + axis = 2 + We get: + Out.shape = (3 * 100, 4 * 100) + +Case 2: + Given + X.shape = (3, 100, 100, 4) + and + axis = 0 + We get: + Out.shape = (1, 3 * 100 * 100 * 4) +)DOC"); + } +}; + +class FlattenGradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + context->SetOutputDim(framework::GradVarName("X"), + context->GetInputDim("X")); + context->ShareLoD("X", framework::GradVarName("X")); + } +}; + +class FlattenGradOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto dx_name = Output(framework::GradVarName("X")); + auto dout_name = Input(framework::GradVarName("Out")); + auto in_dims = + scope.FindVar(Input("X"))->Get().dims(); + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(in_dims); + attrs["inplace"] = false; + + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape", {{"X", {dout_name}}, {"Shape", {}}}, {{"Out", {dx_name}}}, + attrs); + reshape_op->Run(scope, place); + } +}; + +} // namespace operators +} // namespace paddle + +USE_OP(reshape); + +namespace ops = paddle::operators; +REGISTER_OPERATOR(flatten, ops::FlattenOp, ops::FlattenOpMaker, + ops::FlattenOpInferShape, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(flatten_grad, ops::FlattenGradOp, ops::FlattenGradInferShape); diff --git a/paddle/fluid/operators/ftrl_op.cc b/paddle/fluid/operators/ftrl_op.cc index cbdcce9beb..70ba25c213 100644 --- a/paddle/fluid/operators/ftrl_op.cc +++ b/paddle/fluid/operators/ftrl_op.cc @@ -64,8 +64,7 @@ class FTRLOp : public framework::OperatorWithKernel { class FTRLOpMaker : public framework::OpProtoAndCheckerMaker { public: - FTRLOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter value that has to be updated."); diff --git a/paddle/fluid/operators/fused_elemwise_activation_op.cc b/paddle/fluid/operators/fused_elemwise_activation_op.cc new file mode 100644 index 0000000000..a6fd0aeb02 --- /dev/null +++ b/paddle/fluid/operators/fused_elemwise_activation_op.cc @@ -0,0 +1,221 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include + +#include "paddle/fluid/operators/fused_elemwise_activation_op.h" + +namespace paddle { +namespace operators { + +class FusedElemwiseActivationOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE( + ctx->HasInput("X"), + "Input(X) of FusedElemwiseActivationOp op should not be null."); + PADDLE_ENFORCE( + ctx->HasInput("Y"), + "Input(Y) of FusedElemwiseActivationOp op should not be null."); + PADDLE_ENFORCE( + ctx->HasOutput("Out"), + "Output(Out) of FusedElemwiseActivationOp op should not be null."); + + auto x_dim = ctx->GetInputDim("X"); + auto y_dim = ctx->GetInputDim("Y"); + PADDLE_ENFORCE_GE(x_dim.size(), y_dim.size(), + "Rank of first input must >= rank of second input."); + + ctx->SetOutputDim("Out", x_dim); + ctx->ShareLoD("X", /*->*/ "Out"); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + PADDLE_ENFORCE_EQ(ctx.Input("X")->type(), + ctx.Input("Y")->type(), + "The element's type of input should be the same."); + auto input_data_type = + framework::ToDataType(ctx.Input("X")->type()); + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } +}; + +class FusedElemwiseActivationMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(vector)"); + AddInput("Y", "(vector)"); + AddOutput("Out", "vector"); + AddAttr("axis", + "axis is used by elementwise_op, the default value is -1.") + .SetDefault(-1); + AddAttr("scale", + "scale is used by scale_op, the default value is 0.0.") + .SetDefault(0.0); + AddAttr("recomputation", + "Whether to recompute the Out." + "fused_elemwise_activation_grad has two methods to get the " + "dx and dy, one " + "is to use the 'Out', and the other is not to use it. " + "The former method will save the time of recomputing the " + "'Out', but it must occupy the memory to store the 'out'. " + "While, the later method can avoid occupying the memory, " + "but it must recompute the 'Out'. The default value is true.") + .SetDefault(true); + AddAttr>("functor_list", + "The functors that should be fused.") + .AddCustomChecker([&](const std::vector &functor_list) { + PADDLE_ENFORCE(ValidCheck(functor_list)); + }); + + AddComment(R"DOC( +FusedElemwiseActivation Operator. + +At present, FusedElemwiseActivation only supports Two kinds of compound +operators (elementwise_op and activation_op): + + Z = Binary(X, Unary(Y)) + Z = Unary(Binary(X, Y)) + +The attributions of activation_op can be get from fused_elemwise_activation_op's +attributions. functor_list records the functors to be fused, for example +"scale,elementwise_add". + +)DOC"); + } + + private: + bool ValidCheck(const std::vector &functors) { + std::unordered_set unary_fun = {"scale", "relu"}; + std::unordered_set binary_fun = {"elementwise_add"}; + + std::string unary_fun_str; + if (binary_fun.count(functors[0])) { + unary_fun_str = functors[1]; + } else if (binary_fun.count(functors[1])) { + unary_fun_str = functors[0]; + } else { + PADDLE_THROW("%s and %s are not included in fused_list.", functors[0], + functors[1]); + } + PADDLE_ENFORCE_EQ(unary_fun.count(unary_fun_str), 1, + "%s is not included in fused_list.", unary_fun_str); + return true; + } +}; + +class FusedElemwiseActivationGradMaker + : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *op_desc_ptr = new framework::OpDesc(); + op_desc_ptr->SetType(this->ForwardOpType() + "_grad"); + + for (auto &input_param : this->InputNames()) { + op_desc_ptr->SetInput(input_param, this->Input(input_param)); + op_desc_ptr->SetOutput(framework::GradVarName(input_param), + this->InputGrad(input_param, true)); + } + + for (auto &output_param : this->OutputNames()) { + op_desc_ptr->SetInput(output_param, this->Output(output_param)); + op_desc_ptr->SetInput(framework::GradVarName(output_param), + this->OutputGrad(output_param)); + } + op_desc_ptr->SetAttrMap(this->Attrs()); + + std::vector functor_names = + boost::get>( + op_desc_ptr->GetAttr("functor_list")); + functor_names[0] += "_grad"; + functor_names[1] += "_grad"; + op_desc_ptr->SetAttr("functor_list", functor_names); + return std::unique_ptr(op_desc_ptr); + } +}; + +class FusedElemwiseActivationOpGrad : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) should not be null"); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null"); + + auto x_dims = ctx->GetInputDim("X"); + auto y_dims = ctx->GetInputDim("Y"); + auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); + + PADDLE_ENFORCE_GE(x_dims.size(), y_dims.size(), + "Rank of first input must >= rank of second input."); + + auto x_grad_name = framework::GradVarName("X"); + auto y_grad_name = framework::GradVarName("Y"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + } + if (ctx->HasOutput(y_grad_name)) { + ctx->SetOutputDim(y_grad_name, y_dims); + } + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + auto input_data_type_index = ctx.Input("X")->type(); + PADDLE_ENFORCE_EQ(input_data_type_index, + ctx.Input("Y")->type(), + "The element's type of input should be the same."); + PADDLE_ENFORCE_EQ( + input_data_type_index, + ctx.Input(framework::GradVarName("Out"))->type(), + "The element's type of input should be the same."); + + auto input_data_type = framework::ToDataType(input_data_type_index); + return framework::OpKernelType(input_data_type, ctx.GetPlace()); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(fused_elemwise_activation, ops::FusedElemwiseActivationOp, + ops::FusedElemwiseActivationMaker, + ops::FusedElemwiseActivationGradMaker); +REGISTER_OPERATOR(fused_elemwise_activation_grad, + ops::FusedElemwiseActivationOpGrad); + +REGISTER_OP_CPU_KERNEL( + fused_elemwise_activation, + ops::FusedElemwiseActivationKernel, + ops::FusedElemwiseActivationKernel); + +REGISTER_OP_CPU_KERNEL( + fused_elemwise_activation_grad, + ops::FusedElemwiseActivationGradKernel, + ops::FusedElemwiseActivationGradKernel); diff --git a/paddle/fluid/operators/fused_elemwise_activation_op.cu b/paddle/fluid/operators/fused_elemwise_activation_op.cu new file mode 100644 index 0000000000..e1d2b16b4b --- /dev/null +++ b/paddle/fluid/operators/fused_elemwise_activation_op.cu @@ -0,0 +1,30 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/fused_elemwise_activation_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + fused_elemwise_activation, + ops::FusedElemwiseActivationKernel, + ops::FusedElemwiseActivationKernel); + +REGISTER_OP_CUDA_KERNEL( + fused_elemwise_activation_grad, + ops::FusedElemwiseActivationGradKernel, + ops::FusedElemwiseActivationGradKernel); diff --git a/paddle/fluid/operators/fused_elemwise_activation_op.h b/paddle/fluid/operators/fused_elemwise_activation_op.h new file mode 100644 index 0000000000..fe0017b824 --- /dev/null +++ b/paddle/fluid/operators/fused_elemwise_activation_op.h @@ -0,0 +1,425 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/elementwise_op_function.h" +#include "paddle/fluid/operators/math/functors.h" + +namespace math = paddle::operators::math; + +namespace paddle { +namespace operators { + +// CompoundFunctors +// For example: Z = Binary(X, Unary(Y)) +template +struct BinaryCompoundFunctor { + BinaryCompoundFunctor(const BinaryFun &binary_fun, const UnaryFun &unary_fun) + : binary_fun_(binary_fun), unary_fun_(unary_fun) {} + + inline HOSTDEVICE T operator()(T x, T y) { + return binary_fun_(x, unary_fun_(y)); + } + + private: + BinaryFun binary_fun_; + UnaryFun unary_fun_; +}; + +// For example: Z = Unary(Binary(X, Y)) +template +struct UnaryCompoundFunctor { + UnaryCompoundFunctor(const UnaryFun &unary_fun, const BinaryFun &binary_fun) + : unary_fun_(unary_fun), binary_fun_(binary_fun) {} + + inline HOSTDEVICE T operator()(T x, T y) { + return unary_fun_(binary_fun_(x, y)); + } + + private: + UnaryFun unary_fun_; + BinaryFun binary_fun_; +}; + +// FIXME(zcd): DBinaryFun and DUnaryFun have to method to get +// the dx, one is to use the 'out', and the other is not to use it. +// the former method will save the time of recomputing the +// 'out', but it must occupy the memory to store the 'out'. +// While the later method can avoid occupying this memory, +// but it must recompute the 'out'. + +template +struct BinaryCompoundGradDxFunctor { + BinaryCompoundGradDxFunctor(const DBinaryFun &d_binary_fun, + const UnaryFun &unary_fun) + : d_binary_fun_(d_binary_fun), unary_fun_(unary_fun) {} + + inline HOSTDEVICE T operator()(T x, T y, T out, T dout) { + if (Recomputation) { + return dout * d_binary_fun_(x, unary_fun_(y)); + } else { + return dout * d_binary_fun_(x, unary_fun_(y), out); + } + } + + private: + DBinaryFun d_binary_fun_; + UnaryFun unary_fun_; +}; + +template +struct BinaryCompoundGradDyFunctor { + BinaryCompoundGradDyFunctor(const DBinaryFun &d_binary_fun, + const UnaryFun &unary_fun, + const DUnaryFun &d_unary_fun) + : d_binary_fun_(d_binary_fun), + unary_fun_(unary_fun), + d_unary_fun_(d_unary_fun) {} + + inline HOSTDEVICE T operator()(T x, T y, T out, T dout) { + if (Recomputation) { + return dout * d_binary_fun_(unary_fun_(y), x) * d_unary_fun_(y); + } else { + return dout * d_binary_fun_(unary_fun_(y), x, out) * d_unary_fun_(y); + } + } + + private: + DBinaryFun d_binary_fun_; + UnaryFun unary_fun_; + DUnaryFun d_unary_fun_; +}; + +template +struct UnaryCompoundGradDxFunctor { + UnaryCompoundGradDxFunctor(const DUnaryFun &d_unary_fun, + const BinaryFun &binary_fun, + const DBinaryFun &d_binary_fun) + : d_unary_fun_(d_unary_fun), + binary_fun_(binary_fun), + d_binary_fun_(d_binary_fun) {} + + inline HOSTDEVICE T operator()(T x, T y, T out, T dout) { + T base; + if (Recomputation) { + base = dout * d_unary_fun_(binary_fun_(x, y)); + } else { + base = dout * d_unary_fun_(binary_fun_(x, y), out); + } + return base * d_binary_fun_(x, y); + } + + private: + DUnaryFun d_unary_fun_; + BinaryFun binary_fun_; + DBinaryFun d_binary_fun_; +}; + +template +struct UnaryCompoundGradDyFunctor { + UnaryCompoundGradDyFunctor(const DUnaryFun &d_unary_fun, + const BinaryFun &binary_fun, + const DBinaryFun &d_binary_fun) + : d_unary_fun_(d_unary_fun), + binary_fun_(binary_fun), + d_binary_fun_(d_binary_fun) {} + + inline HOSTDEVICE T operator()(T x, T y, T out, T dout) { + T base; + if (Recomputation) { + base = dout * d_unary_fun_(binary_fun_(x, y)); + } else { + base = dout * d_unary_fun_(binary_fun_(x, y), out); + } + return base * d_binary_fun_(y, x); + } + + private: + DUnaryFun d_unary_fun_; + BinaryFun binary_fun_; + DBinaryFun d_binary_fun_; +}; + +template +static void RunBinaryCompoundFunctor(const framework::ExecutionContext &ctx, + const BinaryFunctor &binary_functor, + const UnaryFunctor &unary_functor, + const framework::Tensor *in_x, + const framework::Tensor *in_y, + framework::Tensor *output) { + int axis = ctx.Attr("axis"); + using BinaryCompoundFunctor = + BinaryCompoundFunctor; + + ElementwiseComputeEx( + ctx, in_x, in_y, axis, + BinaryCompoundFunctor(binary_functor, unary_functor), output); +} + +template +static void RunUnaryCompoundFunctors(const framework::ExecutionContext &ctx, + const UnaryFunctor &unary_functor, + const BinaryFunctor &binary_functor, + const framework::Tensor *in_x, + const framework::Tensor *in_y, + framework::Tensor *output) { + int axis = ctx.Attr("axis"); + + using UnaryCompoundFunctor = + UnaryCompoundFunctor; + + ElementwiseComputeEx( + ctx, in_x, in_y, axis, + UnaryCompoundFunctor(unary_functor, binary_functor), output); +} + +template +static void RunBinaryCompoundGradFunctors( + const framework::ExecutionContext &ctx, + const BinaryGradFunctor &binary_grad_functor, + const UnaryFunctor &unary_functor, + const UnaryGradFunctor &unary_grad_functor, const framework::Tensor *in_x, + const framework::Tensor *in_y, const framework::Tensor *in_out, + const framework::Tensor *in_out_grad, framework::Tensor *x_grad, + framework::Tensor *y_grad) { + int axis = ctx.Attr("axis"); + + using BinaryCompoundDxFunctor = + BinaryCompoundGradDxFunctor; + using BinaryCompoundDyFunctor = + BinaryCompoundGradDyFunctor; + + ElemwiseGradCompute( + ctx, *in_x, *in_y, *in_out, *in_out_grad, axis, x_grad, y_grad, + BinaryCompoundDxFunctor(binary_grad_functor, unary_functor), + BinaryCompoundDyFunctor(binary_grad_functor, unary_functor, + unary_grad_functor)); +} + +template +static void RunUnaryCompoundGradFunctors( + const framework::ExecutionContext &ctx, + const UnaryGradFunctor &unary_grad_functor, + const BinaryFunctor &binary_functor, + const BinaryGradFunctor &binary_grad_functor, const framework::Tensor *in_x, + const framework::Tensor *in_y, const framework::Tensor *in_out, + const framework::Tensor *in_out_grad, framework::Tensor *x_grad, + framework::Tensor *y_grad) { + int axis = ctx.Attr("axis"); + + using UnaryCompoundDxFunctor = + UnaryCompoundGradDxFunctor; + using UnaryCompoundDyFunctor = + UnaryCompoundGradDyFunctor; + + ElemwiseGradCompute( + ctx, *in_x, *in_y, *in_out, *in_out_grad, axis, x_grad, y_grad, + UnaryCompoundDxFunctor(unary_grad_functor, binary_functor, + binary_grad_functor), + UnaryCompoundDyFunctor(unary_grad_functor, binary_functor, + binary_grad_functor)); +} + +template +static void RunFunctors(const framework::ExecutionContext &ctx, + const framework::Tensor *in_x, + const framework::Tensor *in_y, + framework::Tensor *output) { + auto &functors = ctx.Attr>("functor_list"); + auto funcs_str = functors[0] + "," + functors[1]; + // TODO(zcd): The following code can be refined. + if (funcs_str == "elementwise_add,scale") { + // Z = Binary(X, Unary(Y)) + T scale = static_cast(ctx.Attr("scale")); + RunBinaryCompoundFunctor, + math::ScaleFunctor>( + ctx, math::AddFunctor(), math::ScaleFunctor(scale), in_x, in_y, + output); + } else if (funcs_str == "scale,elementwise_add") { + // Z = Unary(Binary(X, Y)) + T scale = static_cast(ctx.Attr("scale")); + RunUnaryCompoundFunctors, + math::AddFunctor>( + ctx, math::ScaleFunctor(scale), math::AddFunctor(), in_x, in_y, + output); + } else if (funcs_str == "elementwise_add,relu") { + RunBinaryCompoundFunctor, + math::ReluFunctor>( + ctx, math::AddFunctor(), math::ReluFunctor(), in_x, in_y, output); + } else if (funcs_str == "relu,elementwise_add") { + RunUnaryCompoundFunctors, + math::AddFunctor>( + ctx, math::ReluFunctor(), math::AddFunctor(), in_x, in_y, output); + } else { + PADDLE_THROW("%s has not been implemented.", funcs_str); + } +} + +template +static void RunGradFunctors(const framework::ExecutionContext &ctx, + const framework::Tensor *in_x, + const framework::Tensor *in_y, + const framework::Tensor *in_out, + const framework::Tensor *in_out_grad, + framework::Tensor *x_grad, + framework::Tensor *y_grad) { + auto &functors = ctx.Attr>("functor_list"); + auto funcs_str = functors[0] + "," + functors[1]; + + bool recomputation = ctx.Attr("recomputation"); + + // TODO(zcd): The following code can be refined. for example, use registion + if (funcs_str == "elementwise_add_grad,scale_grad") { + // The backward of Z = Binary(X, Unary(Y)) + T scale = static_cast(ctx.Attr("scale")); + if (recomputation) { + RunBinaryCompoundGradFunctors, + math::ScaleFunctor, + math::ScaleGradFunctor, true>( + ctx, math::AddGradFunctor(), math::ScaleFunctor(scale), + math::ScaleGradFunctor(scale), in_x, in_y, in_out, in_out_grad, + x_grad, y_grad); + } else { + RunBinaryCompoundGradFunctors, + math::ScaleFunctor, + math::ScaleGradFunctor, false>( + ctx, math::AddGradFunctor(), math::ScaleFunctor(scale), + math::ScaleGradFunctor(scale), in_x, in_y, in_out, in_out_grad, + x_grad, y_grad); + } + } else if (funcs_str == "scale_grad,elementwise_add_grad") { + // The backward of Z = Unary(Binary(X, Y)) + T scale = static_cast(ctx.Attr("scale")); + if (recomputation) { + RunUnaryCompoundGradFunctors, + math::AddFunctor, math::AddGradFunctor, + true>(ctx, math::ScaleGradFunctor(scale), + math::AddFunctor(), + math::AddGradFunctor(), in_x, in_y, + in_out, in_out_grad, x_grad, y_grad); + } else { + RunUnaryCompoundGradFunctors, + math::AddFunctor, math::AddGradFunctor, + false>(ctx, math::ScaleGradFunctor(scale), + math::AddFunctor(), + math::AddGradFunctor(), in_x, in_y, + in_out, in_out_grad, x_grad, y_grad); + } + } else if (funcs_str == "elementwise_add_grad,relu_grad") { + if (recomputation) { + RunBinaryCompoundGradFunctors, + math::ReluFunctor, + math::ReluGradFunctor, true>( + ctx, math::AddGradFunctor(), math::ReluFunctor(), + math::ReluGradFunctor(), in_x, in_y, in_out, in_out_grad, x_grad, + y_grad); + } else { + RunBinaryCompoundGradFunctors, + math::ReluFunctor, + math::ReluGradFunctor, false>( + ctx, math::AddGradFunctor(), math::ReluFunctor(), + math::ReluGradFunctor(), in_x, in_y, in_out, in_out_grad, x_grad, + y_grad); + } + } else if (funcs_str == "relu_grad,elementwise_add_grad") { + if (recomputation) { + RunUnaryCompoundGradFunctors, + math::AddFunctor, math::AddGradFunctor, + true>(ctx, math::ReluGradFunctor(), + math::AddFunctor(), + math::AddGradFunctor(), in_x, in_y, + in_out, in_out_grad, x_grad, y_grad); + } else { + RunUnaryCompoundGradFunctors, + math::AddFunctor, math::AddGradFunctor, + false>(ctx, math::ReluGradFunctor(), + math::AddFunctor(), + math::AddGradFunctor(), in_x, in_y, + in_out, in_out_grad, x_grad, y_grad); + } + } else { + PADDLE_THROW("%s has not been implemented.", funcs_str); + } +} + +template +class FusedElemwiseActivationKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + auto &in_x = detail::Ref(ctx.Input("X"), + "Cannot get input tensor %s, variable name = %s", + "X", ctx.op().Input("X")); + auto &in_y = detail::Ref(ctx.Input("Y"), + "Cannot get input tensor %s, variable name = %s", + "Y", ctx.op().Input("Y")); + auto &output = detail::Ref(ctx.Output("Out"), + "Cannot get input tensor %s, variable name = %s", + "Out", ctx.op().Output("Out")); + + RunFunctors(ctx, &in_x, &in_y, &output); + } +}; + +template +class FusedElemwiseActivationGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + auto &in_x = detail::Ref(ctx.Input("X"), + "Cannot get input tensor %s, variable name = %s", + "X", ctx.op().Input("X")); + auto &in_y = detail::Ref(ctx.Input("Y"), + "Cannot get input tensor %s, variable name = %s", + "Y", ctx.op().Input("Y")); + auto &in_out = detail::Ref(ctx.Input("Out"), + "Cannot get input tensor %s, variable name = %s", + "Out", ctx.op().Input("Out")); + auto &in_out_grad = + detail::Ref(ctx.Input(framework::GradVarName("Out")), + "Cannot get input tensor %s, variable name = %s", + framework::GradVarName("Out"), + ctx.op().Input(framework::GradVarName("Out"))); + + framework::Tensor *x_grad = + ctx.Output(framework::GradVarName("X")); + framework::Tensor *y_grad = + ctx.Output(framework::GradVarName("Y")); + + RunGradFunctors(ctx, &in_x, &in_y, &in_out, &in_out_grad, + x_grad, y_grad); + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/gather_op.cc b/paddle/fluid/operators/gather_op.cc index 4c82f5c429..aa3e05b83b 100644 --- a/paddle/fluid/operators/gather_op.cc +++ b/paddle/fluid/operators/gather_op.cc @@ -33,7 +33,6 @@ class GatherOp : public framework::OperatorWithKernel { auto index_dims = ctx->GetInputDim("Index"); PADDLE_ENFORCE(index_dims.size() == 1); int batch_size = ctx->GetInputDim("Index")[0]; - PADDLE_ENFORCE_GE(batch_size, 0, "Batch size must be >0"); framework::DDim output_dims(ctx->GetInputDim("X")); output_dims[0] = batch_size; ctx->SetOutputDim("Out", output_dims); @@ -67,8 +66,7 @@ class GatherGradOp : public framework::OperatorWithKernel { class GatherOpMaker : public framework::OpProtoAndCheckerMaker { public: - GatherOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The source input of gather op"); AddInput("Index", "The index input of gather op"); AddOutput("Out", "The output of gather op"); diff --git a/paddle/fluid/operators/gather_test.cc b/paddle/fluid/operators/gather_test.cc index 9c0561b016..f6b156eb30 100644 --- a/paddle/fluid/operators/gather_test.cc +++ b/paddle/fluid/operators/gather_test.cc @@ -43,7 +43,8 @@ TEST(Gather, GatherData) { auto* cpu_place = new paddle::platform::CPUPlace(); paddle::platform::CPUDeviceContext ctx(*cpu_place); paddle::operators::CPUGather(ctx, *src, *index, output); - + delete cpu_place; + cpu_place = NULL; for (int i = 0; i < 4; ++i) EXPECT_EQ(p_output[i], i + 4); for (int i = 4; i < 8; ++i) EXPECT_EQ(p_output[i], i - 4); diff --git a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc index 53c706a83e..4a97428148 100644 --- a/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/gaussian_random_batch_size_like_op.cc @@ -32,16 +32,16 @@ class GaussianRandomBatchSizeLikeOp : public BatchSizeLikeOp { }; class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { - public: - GaussianRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : BatchSizeLikeOpMaker(proto, op_checker) { + protected: + void Apply() override { AddAttr("mean", "(float, default 0.0) " - "mean of random tensor.") + "The mean (or center) of the gaussian distribution.") .SetDefault(.0f); AddAttr("std", "(float, default 1.0) " - "std of random tensor.") + "The standard deviation (std, or spread) of the " + "gaussian distribution.") .SetDefault(1.0f); AddAttr("seed", "(int, default 0) " @@ -56,9 +56,11 @@ class GaussianRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { .SetDefault(framework::proto::VarType::FP32); AddComment(R"DOC( -GaussianRandom Operator. Used to initialize tensors with gaussian random generator. +The defalut mean of the distribution is 0. and defalut standard +deviation (std) of the distribution is 1.. Uers can set mean and std +by input arguments. )DOC"); } }; diff --git a/paddle/fluid/operators/gaussian_random_mkldnn_op.cc b/paddle/fluid/operators/gaussian_random_mkldnn_op.cc new file mode 100644 index 0000000000..76b00b396c --- /dev/null +++ b/paddle/fluid/operators/gaussian_random_mkldnn_op.cc @@ -0,0 +1,55 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/operators/mean_op.h" + +namespace paddle { +namespace operators { + +using framework::DataLayout; +template +class GaussianMKLDNNKernel : public paddle::framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + float mean = context.Attr("mean"); + float std = context.Attr("std"); + auto* tensor = context.Output("Out"); + T* data = tensor->mutable_data(context.GetPlace()); + + unsigned int seed = static_cast(context.Attr("seed")); + std::minstd_rand engine; + if (seed == 0) { + seed = std::random_device()(); + } + engine.seed(seed); + std::normal_distribution dist(mean, std); + int64_t size = tensor->numel(); + for (int64_t i = 0; i < size; ++i) { + data[i] = dist(engine); + } + + // The format of output is set as the mkldnn's format + // TODO(@mozga-intel) The format of matrix sets inside the another layers. + tensor->set_layout(DataLayout::kMKLDNN); + tensor->set_format(mkldnn::memory::format::oihw); + } +}; +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OP_KERNEL(gaussian_random, MKLDNN, ::paddle::platform::CPUPlace, + ops::GaussianMKLDNNKernel); diff --git a/paddle/fluid/operators/gaussian_random_op.cc b/paddle/fluid/operators/gaussian_random_op.cc index 4d197637b3..1488aab192 100644 --- a/paddle/fluid/operators/gaussian_random_op.cc +++ b/paddle/fluid/operators/gaussian_random_op.cc @@ -15,6 +15,10 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + namespace paddle { namespace operators { @@ -62,16 +66,26 @@ class GaussianRandomOp : public framework::OperatorWithKernel { protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { + framework::LibraryType library{framework::LibraryType::kPlain}; + framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + +#ifdef PADDLE_WITH_MKLDNN + if (library == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + } +#endif + return framework::OpKernelType( static_cast(ctx.Attr("dtype")), - ctx.device_context()); + ctx.device_context(), layout, library); } }; class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { public: - GaussianRandomOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "Output matrix of gaussian random op"); AddAttr>("shape", @@ -96,7 +110,9 @@ class GaussianRandomOpMaker : public framework::OpProtoAndCheckerMaker { "(int, default 5(FP32)) " "Output data type.") .SetDefault(framework::proto::VarType::FP32); - + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); AddComment(R"DOC( GaussianRandom Operator. diff --git a/paddle/fluid/operators/gen_nccl_id_op.cc b/paddle/fluid/operators/gen_nccl_id_op.cc new file mode 100644 index 0000000000..697c239e59 --- /dev/null +++ b/paddle/fluid/operators/gen_nccl_id_op.cc @@ -0,0 +1,141 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/threadpool.h" +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/operators/distributed/request_handler_impl.h" +#include "paddle/fluid/platform/nccl_helper.h" + +namespace paddle { +namespace operators { + +class GenNCCLIdOp : public framework::OperatorBase { + public: + GenNCCLIdOp(const std::string& type, const framework::VariableNameMap& inputs, + const framework::VariableNameMap& outputs, + const framework::AttributeMap& attrs) + : OperatorBase(type, inputs, outputs, attrs) {} + + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + // put nccl id in CPUPlace + auto& dev_ctx = *pool.Get(platform::CPUPlace()); + int trainer_id = Attr("trainer_id"); + framework::Scope& local_scope = scope.NewScope(); + + if (trainer_id == 0) { + GenerateAndSend(&local_scope, dev_ctx); + } else { + GetIdByServer(&local_scope, dev_ctx); + } + } + + private: + void GenerateAndSend(framework::Scope* scope, + const platform::DeviceContext& dev_ctx) const { + auto var = scope->FindVar(NCCL_ID_VARNAME); + PADDLE_ENFORCE_NOT_NULL(var); + auto id = var->GetMutable(); + PADDLE_ENFORCE(platform::dynload::ncclGetUniqueId(id)); + + std::vector endpoint_list = + Attr>("endpoint_list"); + distributed::RPCClient* client = + distributed::RPCClient::GetInstance(); + + for (auto& ep : endpoint_list) { + VLOG(3) << "sending nccl id to " << ep; + client->AsyncSendVar(ep, dev_ctx, *scope, NCCL_ID_VARNAME); + } + client->Wait(); + for (auto& ep : endpoint_list) { + client->AsyncSendBatchBarrier(ep); + } + client->Wait(); + VLOG(3) << "sending completed..."; + } + + void GetIdByServer(framework::Scope* scope, + const platform::DeviceContext& dev_ctx) const { + std::string endpoint = Attr("endpoint"); + // NOTE: Can not use unique_ptr here because the default + // deleter will call GRPC Server's base class's dtor and + // that will cause a wired crash. + distributed::RequestSendHandler rpc_h(true); + std::unique_ptr rpc_service( + new RPCSERVER_T(endpoint, 1)); + + rpc_service->RegisterRPC(distributed::kRequestSend, &rpc_h); + rpc_h.SetRPCServer(rpc_service.get()); + + framework::ProgramDesc empty_program; + framework::Executor executor(dev_ctx.GetPlace()); + rpc_h.SetScope(scope); + rpc_h.SetDevCtx(&dev_ctx); + rpc_h.SetProgram(&empty_program); + rpc_h.SetExecutor(&executor); + + std::thread server_thread( + std::bind(&distributed::RPCServer::StartServer, rpc_service.get())); + + rpc_service->SetCond(distributed::kRequestSend); + VLOG(3) << "start getting nccl id from trainer 0..."; + rpc_service->WaitBarrier(distributed::kRequestSend); + VLOG(3) << "got nccl id and stop server..."; + rpc_service->ShutDown(); + VLOG(3) << "rpc server stopped"; + server_thread.join(); + } +}; + +class GenNCCLIdOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddOutput("NCCLID", "Raw variable contains a NCCL UniqueId instaces."); + AddComment(R"DOC( +GenNCCLId operator + +For trainer 0: generate a new UniqueId and send it to all the other trainers. +For trainer 1~n: start a gRPC server to get the UniqueId, once got, stop the server. +)DOC"); + AddAttr("endpoint", + "(string), e.g. 127.0.0.1:6175 " + "current listen endpoint"); + AddAttr>( + "endpoint_list", + "['trainer1_ip:port', 'trainer2_ip:port', ...] " + "list of trainer endpoints start from trainer 1") + .SetDefault({}); + AddAttr("trainer_id", + "(int default 0) " + "The index of the trainer in distributed training.") + .SetDefault(0); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(gen_nccl_id, ops::GenNCCLIdOp, ops::GenNCCLIdOpMaker); diff --git a/paddle/fluid/operators/get_places_op.cc b/paddle/fluid/operators/get_places_op.cc index 0d7219ac5c..db6ff78256 100644 --- a/paddle/fluid/operators/get_places_op.cc +++ b/paddle/fluid/operators/get_places_op.cc @@ -78,15 +78,14 @@ class GetPlacesOp : public framework::OperatorBase { class GetPlacesOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - GetPlacesOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput("Out", "vector of Place"); AddAttr("device_count", "device count").SetDefault(0); AddAttr("device_type", "device type") .InEnum({"CUDA", "CPU", "AUTO"}) .SetDefault("AUTO"); AddComment(R"DOC( -Returns a list of places based on flags. The list will be used for parallel +Returns a list of places based on arguments. The list will be used for parallel execution. )DOC"); } diff --git a/paddle/fluid/operators/go_op.cc b/paddle/fluid/operators/go_op.cc index b8e1556c23..48f9d967ad 100644 --- a/paddle/fluid/operators/go_op.cc +++ b/paddle/fluid/operators/go_op.cc @@ -89,8 +89,7 @@ class GoOp : public framework::OperatorBase { class GoOpMaker : public framework::OpProtoAndCheckerMaker { public: - GoOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kX, "A set of variables, which are required by operators inside the " "block of Go Op.") diff --git a/paddle/fluid/operators/gru_op.cc b/paddle/fluid/operators/gru_op.cc index 0a524c914d..5c74687882 100644 --- a/paddle/fluid/operators/gru_op.cc +++ b/paddle/fluid/operators/gru_op.cc @@ -71,8 +71,7 @@ class GRUOp : public framework::OperatorWithKernel { class GRUOpMaker : public framework::OpProtoAndCheckerMaker { public: - GRUOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LoDTensor) The first input is a LodTensor, which supports " "variable-time length input sequence. The underlying tensor in " diff --git a/paddle/fluid/operators/gru_unit_op.cc b/paddle/fluid/operators/gru_unit_op.cc index f8d1d44b54..82a808b01e 100644 --- a/paddle/fluid/operators/gru_unit_op.cc +++ b/paddle/fluid/operators/gru_unit_op.cc @@ -71,8 +71,7 @@ class GRUUnitOp : public framework::OperatorWithKernel { class GRUUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: - GRUUnitOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(Tensor) Matrix with shape [batch_size, frame_size * 3] for the " "input."); diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.cc b/paddle/fluid/operators/hierarchical_sigmoid_op.cc new file mode 100644 index 0000000000..dadd054b9a --- /dev/null +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.cc @@ -0,0 +1,167 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/hierarchical_sigmoid_op.h" +#include + +namespace paddle { +namespace operators { + +/** + * Organize the classes into a binary tree. At each node, a sigmoid function + * is used to calculate the probability of belonging to the right branch. + * This idea is from "F. Morin, Y. Bengio (AISTATS 05): + * Hierarchical Probabilistic Neural Network Language Model." + * + * Here we uses a simple way of making the binary tree. + * Assuming the number of classes C = 6, + * The classes are organized as a binary tree in the following way: + * + * @code{.py} + * *-*-*- 2 + * | | |- 3 + * | | + * | |-*- 4 + * | |- 5 + * | + * |-*- 0 + * |- 1 + * @endcode + * + * where * indicates an internal node, and each leaf node represents a class. + * - Node 0 ... C-2 are internal nodes. + * - Node C-1 ... 2C-2 are leaf nodes. + * - Class c is represented by leaf node \f$c+C-1\f$. + * + * We assign an id for each node: + * - the id of root be 0. + * - the left child of a node i is 2*i+1. + * - the right child of a node i is 2*i+2. + * + * It's easy to see that: + * - the parent of node i is \f$\left\lfloor(i-1)/2\right\rfloor\f$. + * - the j-th level ancestor of node i is + * \f$\left\lfloor(i+1)/2^{j+1}\right\rfloor - 1\f$. + * - A node i is a left child of its parent if \f$(i-1)\%2==0\f$. + * + */ + +class HierarchicalSigmoidOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("PreOut"), + "Output(PreOut) should not be null."); + const int64_t batch_size = ctx->GetInputDim("X")[0]; + std::vector output_shape({batch_size, 1}); + ctx->SetOutputDim("Out", framework::make_ddim(output_shape)); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.GetPlace()); + } +}; + +template +class HierarchicalSigmoidOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", + "(Tensor, required) The input tensor with shape [N, D], " + "where N is the size of mini-batch, and D is the feature size."); + AddInput("W", + "(Tensor, required), The parameters of hierarchical " + "sigmoid operator, each of them is a 2-D tensor, the shape is" + "[num_classes - 1, D]."); + AddInput("Label", + "(Tensor, required), The labels of training data. It's a" + "tensor with shape [N, 1]."); + AddInput("Bias", + "(Tensor, optional), The bias is a tensor with shape" + "[1, num_classes - 1]."); + AddOutput("Out", + "(Tensor, required) The output of hierarchical sigmoid operator." + "The shape is [N, 1]."); + AddOutput("PreOut", + "(Tensor, required) A intermedia 2-D tensor with shape " + "[batch_size, code_length], where code_length represents the " + "maximum path length from root to leaf nodes.") + .AsIntermediate(); + AddAttr("num_classes", "(int, required), The number of classes") + .SetDefault(2); + AddComment(R"DOC( +The hierarchical sigmoid operator organize the classes into a binary tree. +At each node, a sigmoid function is used to calculate the probability of +belonging to the right branch. This idea is from +"F. Morin, Y. Bengio (AISTATS 05): +Hierarchical Probabilistic Neural Network Language Model." + )DOC"); + } +}; + +class HierarchicalSigmoidGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("W"), "Input(W) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Label"), "Input(Label) should not be null."); + PADDLE_ENFORCE(ctx->HasInput("PreOut"), + "Input(Preout) should not be null."); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("W")), + "Output(W@Grad should not be null.)"); + PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X"))); + if (ctx->HasOutput(framework::GradVarName("Bias"))) { + ctx->SetOutputDim(framework::GradVarName("Bias"), + ctx->GetInputDim("Bias")); + } + ctx->SetOutputDim(framework::GradVarName("W"), ctx->GetInputDim("W")); + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.GetPlace()); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(hierarchical_sigmoid, ops::HierarchicalSigmoidOp, + ops::HierarchicalSigmoidOpMaker, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(hierarchical_sigmoid_grad, ops::HierarchicalSigmoidGradOp); +REGISTER_OP_CPU_KERNEL( + hierarchical_sigmoid, + ops::HierarchicalSigmoidOpKernel, + ops::HierarchicalSigmoidOpKernel); +REGISTER_OP_CPU_KERNEL( + hierarchical_sigmoid_grad, + ops::HierarchicalSigmoidGradOpKernel, + ops::HierarchicalSigmoidGradOpKernel); diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.h b/paddle/fluid/operators/hierarchical_sigmoid_op.h new file mode 100644 index 0000000000..64096a717b --- /dev/null +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.h @@ -0,0 +1,135 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/clip_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/matrix_bit_code.h" +#include "paddle/fluid/platform/transform.h" +namespace paddle { +namespace operators { + +template +using EigenMatrix = framework::EigenMatrix; +using platform::Transform; + +template +class HierarchicalSigmoidOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in = ctx.Input("X"); + auto* w = ctx.Input("W"); + auto* label = ctx.Input("Label"); + auto* bias = ctx.Input("Bias"); + auto* out = ctx.Output("Out"); + auto* pre_out = ctx.Output("PreOut"); + size_t num_classes = static_cast(ctx.Attr("num_classes")); + int64_t code_length = math::FindLastSet(num_classes - 1); + int64_t batch_size = in->dims()[0]; + framework::Tensor sum; + auto& dev_ctx = ctx.template device_context(); + auto* pre_out_data = pre_out->mutable_data( + framework::make_ddim({batch_size, code_length}), ctx.GetPlace()); + auto pre_out_mat = EigenMatrix::From(*pre_out); + // Not all class(leaf) nodes' path lengths equal code_length, thus init as + // 0s can avoid out of path's loss. + math::SetConstant zero; + zero(dev_ctx, pre_out, static_cast(0.0)); + auto& place = *ctx.template device_context().eigen_device(); + math::RowwiseSum row_sum; + math::MatrixBitCodeFunctor bit_code(num_classes, label->data()); + + std::vector sum_dims({batch_size, 1UL}); + sum.mutable_data(framework::make_ddim(sum_dims), ctx.GetPlace()); + auto sum_mat = EigenMatrix::From(sum); + out->mutable_data(ctx.GetPlace()); + auto out_mat = framework::EigenVector::Flatten(*out); + if (bias) { + bit_code.Add(pre_out, *bias); + } + bit_code.Mul(pre_out, *w, *in); + // clip to [-40, 40] + Transform trans; + trans(ctx.template device_context(), pre_out_data, + pre_out_data + pre_out->numel(), pre_out_data, + ClipFunctor(static_cast(-40.0), static_cast(40.0))); + bit_code.Sum(*pre_out, out, static_cast(-1)); + // use softrelu to calculate cross entropy + pre_out_mat.device(place) = (static_cast(1.0) + pre_out_mat.exp()).log(); + row_sum(dev_ctx, *pre_out, &sum); + // TODO(guosheng): Subtract the out of path's loss, since not all + // class(leaf) nodes' path lengths equal code_length. But it won't break the + // gradient check since both have the out of path's loss and will cancel out + // each other. + out_mat.device(place) = sum_mat + out_mat; + } +}; + +template +class HierarchicalSigmoidGradOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in = ctx.Input("X"); + auto* w = ctx.Input("W"); + auto* in_grad = ctx.Output(framework::GradVarName("X")); + auto* w_grad = ctx.Output(framework::GradVarName("W")); + auto* bias_grad = + ctx.Output(framework::GradVarName("Bias")); + auto* label = ctx.Input("Label"); + auto* pre_out = ctx.Input("PreOut"); + auto* out_grad = + ctx.Input(framework::GradVarName("Out")); + framework::Tensor pre_out_grad; + + pre_out_grad.mutable_data(pre_out->dims(), ctx.GetPlace()); + in_grad->mutable_data(ctx.GetPlace()); + w_grad->mutable_data(ctx.GetPlace()); + auto& dev_ctx = ctx.template device_context(); + math::SetConstant zero; + zero(dev_ctx, in_grad, static_cast(0.0)); + zero(dev_ctx, w_grad, static_cast(0.0)); + + size_t num_classes = static_cast(ctx.Attr("num_classes")); + math::MatrixBitCodeFunctor bit_code(num_classes, label->data()); + + auto& place = *ctx.template device_context().eigen_device(); + auto pre_out_mat = EigenMatrix::From(*pre_out); + auto pre_out_grad_mat = EigenMatrix::From(pre_out_grad); + auto out_grad_mat = EigenMatrix::From(*out_grad); + Eigen::array bcast({{1, static_cast(pre_out_grad.dims()[1])}}); + + // softrelu derivative + pre_out_grad_mat.device(place) = + static_cast(1.0) - static_cast(1.0) / pre_out_mat.exp(); + bit_code.Sub(&pre_out_grad); // the gradient of clip(w * x + b) + pre_out_grad_mat.device(place) = + pre_out_grad_mat * out_grad_mat.broadcast(bcast); + // TODO(guosheng): multiply pre_out_grad with subgradient of clipping to + // be consistent with the clipping in forward. + if (bias_grad) { + bias_grad->mutable_data(ctx.GetPlace()); + zero(dev_ctx, bias_grad, static_cast(0.0)); + bit_code.AddGrad(pre_out_grad, bias_grad); + } + bit_code.MulGradWeight(pre_out_grad, w_grad, *in); + bit_code.MulGradError(pre_out_grad, *w, in_grad); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/hinge_loss_op.cc b/paddle/fluid/operators/hinge_loss_op.cc index 086b5a97de..69e7fa4490 100644 --- a/paddle/fluid/operators/hinge_loss_op.cc +++ b/paddle/fluid/operators/hinge_loss_op.cc @@ -46,8 +46,7 @@ class HingeLossOp : public framework::OperatorWithKernel { template class HingeLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - HingeLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Logits", "The input value (Logits) of Hinge loss op." "Logits is a 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/huber_loss_op.cc b/paddle/fluid/operators/huber_loss_op.cc index 74d8e0e2b7..4ecd8634ff 100644 --- a/paddle/fluid/operators/huber_loss_op.cc +++ b/paddle/fluid/operators/huber_loss_op.cc @@ -45,8 +45,7 @@ class HuberLossOp : public framework::OperatorWithKernel { template class HuberLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - HuberLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input value of huber loss op." "X is a 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index 8c120eec86..8efd43928a 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/im2sequence_op.h" +#include #include namespace paddle { @@ -28,40 +29,34 @@ class Im2SequenceOp : public framework::OperatorWithKernel { "Input(X) of Im2SequenceOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of Im2SequenceOp op should not be null."); - auto in_dim = ctx->GetInputDim("X"); + PADDLE_ENFORCE_EQ(in_dim.size(), 4, "Input(X) format must be 4D tensor, eg., NCHW."); + int img_channels = in_dim[1]; auto kernels = ctx->Attrs().Get>("kernels"); auto strides = ctx->Attrs().Get>("strides"); auto paddings = ctx->Attrs().Get>("paddings"); - int batch_size = in_dim[0]; - int img_channels = in_dim[1]; - int img_height = in_dim[2]; - int img_width = in_dim[3]; - - int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], - paddings[2], strides[0]); - int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1], - paddings[3], strides[1]); - - ctx->SetOutputDim("Out", {batch_size * output_height * output_width, - img_channels * kernels[0] * kernels[1]}); + ctx->SetOutputDim("Out", + {in_dim[0], img_channels * kernels[0] * kernels[1]}); } }; class Im2SequenceOpMaker : public framework::OpProtoAndCheckerMaker { public: - Im2SequenceOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor has NCHW format." "N: batch size" "C: channels" "H: height" "W: width"); + AddInput("Y", + "(Tensor) The input tensor of image real size(H, W)." + "2-D with shape [batchsize, 2]") + .AsDispensable(); AddOutput("Out", "(LodTensor) The output data of im2sequence op,"); AddAttr>("kernels", "(vector), the " @@ -74,6 +69,13 @@ class Im2SequenceOpMaker : public framework::OpProtoAndCheckerMaker { "(vector default:{0, 0, 0, 0}), the " "paddings(up_pad, left_pad, down_pad, right_pad)") .SetDefault({0, 0, 0, 0}); + AddAttr>("out_stride", + "the attribute is valid only when input(Y)" + "is not NULL.this attribute represents the" + "scaling of the pic through the CNN" + "(vector dedault:{1,1}),the out_stride" + " (out_stride_height, out_stride_width)") + .SetDefault({1, 1}); AddComment(R"DOC( This op uses kernels to scan images and converts these images to sequences. After expanding, The number of time steps are output_height * output_width @@ -124,7 +126,7 @@ output.data = [[ 6. 2. 8. 3. 2. 4. 6. 3.] [ 7. 1. 7. 9. 2. 1. 3. 5.] [ 5. 7. 2. 4. 1. 3. 9. 0.] [ 7. 9. 4. 8. 3. 5. 0. 8.]] -output.dims = {8, 9} +output.dims = {8, 8} output.lod = [[0, 4, 8]] )DOC"); diff --git a/paddle/fluid/operators/im2sequence_op.h b/paddle/fluid/operators/im2sequence_op.h index d792c68f78..4a99428194 100644 --- a/paddle/fluid/operators/im2sequence_op.h +++ b/paddle/fluid/operators/im2sequence_op.h @@ -13,6 +13,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/fluid/framework/data_layout.h" #include "paddle/fluid/framework/eigen.h" @@ -39,50 +40,107 @@ class Im2SequenceKernel : public framework::OpKernel { void Compute(const framework::ExecutionContext& ctx) const override { const Tensor* in = ctx.Input("X"); LoDTensor* out = ctx.Output("Out"); - out->mutable_data(ctx.GetPlace()); - // TODO(wanghaoshuang): Add layout checker after 'set_layout' - // being available for python API - // PADDLE_ENFORCE_EQ(in->layout(), framework::DataLayout::kNCHW, - // "Input(X) layout must be NCHW"); auto in_dim = in->dims(); int batch_size = in_dim[0]; int img_channels = in_dim[1]; int img_height = in_dim[2]; int img_width = in_dim[3]; - auto kernels = ctx.Attr>("kernels"); auto strides = ctx.Attr>("strides"); auto paddings = ctx.Attr>("paddings"); - int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], - paddings[2], strides[0]); - int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1], - paddings[3], strides[1]); - - const std::vector dilations({1, 1}); - - auto out_dims = out->dims(); - out->Resize({batch_size, out->numel() / batch_size}); - for (int i = 0; i < batch_size; i++) { - const Tensor src = - in->Slice(i, i + 1).Resize({img_channels, img_height, img_width}); - Tensor dst = out->Slice(i, i + 1).Resize( - {output_height, output_width, img_channels, kernels[0], kernels[1]}); - - math::Im2ColFunctor f; - auto& dev_ctx = ctx.template device_context(); - f(dev_ctx, src, dilations, strides, paddings, &dst); - } - out->Resize(out_dims); - - // set lod information - // TODO(wanghaoshuang): Move this to InferShape - framework::LoD lod(1); - lod[0].reserve(batch_size + 1); - for (int i = 0, offset = 0; i < batch_size + 1; ++i) { + if (ctx.HasInput("Y") && batch_size > 1) { + const Tensor* imgrealsize = ctx.Input("Y"); + auto out_stride = ctx.Attr>("out_stride"); + Tensor cpu_shape_tensor; + TensorCopySync(*imgrealsize, platform::CPUPlace(), &cpu_shape_tensor); + std::vector imgreal_h; + std::vector imgreal_w; + std::vector output_height; + std::vector output_width; + int result = 0; + for (int i = 0; i < batch_size; i++) { + int tmp_real_h = static_cast((cpu_shape_tensor.data())[2 * i]); + int tmp_real_w = + static_cast((cpu_shape_tensor.data())[2 * i + 1]); + if (tmp_real_h % out_stride[0] == 0) { + tmp_real_h = tmp_real_h / out_stride[0]; + } else { + tmp_real_h = tmp_real_h / out_stride[0] + 1; + } + if (tmp_real_w % out_stride[1] == 0) { + tmp_real_w = tmp_real_w / out_stride[1]; + } else { + tmp_real_w = tmp_real_w / out_stride[1] + 1; + } + imgreal_h.push_back(tmp_real_h); + imgreal_w.push_back(tmp_real_w); + output_height.push_back(Im2SeqOutputSize( + imgreal_h[i], kernels[0], paddings[0], paddings[2], strides[0])); + output_width.push_back(Im2SeqOutputSize( + imgreal_w[i], kernels[1], paddings[1], paddings[3], strides[1])); + result += output_height[i] * output_width[i]; + } + + out->mutable_data({result, img_channels * kernels[0] * kernels[1]}, + ctx.GetPlace()); + + const std::vector dilations({1, 1}); + int offset_out = 0; + for (int i = 0; i < batch_size; i++) { + const Tensor src = + in->Slice(i, i + 1).Resize({img_channels, img_height, img_width}); + Tensor dst = out->Slice(offset_out, + offset_out + output_height[i] * output_width[i]) + .Resize({output_height[i], output_width[i], + img_channels, kernels[0], kernels[1]}); + offset_out += output_height[i] * output_width[i]; + + math::Im2ColFunctor f; + auto& dev_ctx = ctx.template device_context(); + f(dev_ctx, src, dilations, strides, paddings, &dst); + } + framework::LoD lod(1); + lod[0].reserve(batch_size + 1); + int offset = 0; + lod[0].push_back(offset); + for (int i = 0; i < batch_size; ++i) { + offset += output_height[i] * output_width[i]; + lod[0].push_back(offset); + } + out->set_lod(lod); + } else { + int output_height = Im2SeqOutputSize(img_height, kernels[0], paddings[0], + paddings[2], strides[0]); + int output_width = Im2SeqOutputSize(img_width, kernels[1], paddings[1], + paddings[3], strides[1]); + out->mutable_data({batch_size * output_height * output_width, + img_channels * kernels[0] * kernels[1]}, + ctx.GetPlace()); + const std::vector dilations({1, 1}); + auto out_dims = out->dims(); + out->Resize({batch_size, out->numel() / batch_size}); + for (int i = 0; i < batch_size; i++) { + const Tensor src = + in->Slice(i, i + 1).Resize({img_channels, img_height, img_width}); + Tensor dst = + out->Slice(i, i + 1).Resize({output_height, output_width, + img_channels, kernels[0], kernels[1]}); + + math::Im2ColFunctor f; + auto& dev_ctx = ctx.template device_context(); + f(dev_ctx, src, dilations, strides, paddings, &dst); + } + out->Resize(out_dims); + framework::LoD lod(1); + lod[0].reserve(batch_size + 1); + int offset = 0; lod[0].push_back(offset); - offset += output_height * output_width; + for (int i = 0; i < batch_size; ++i) { + offset += output_height * output_width; + lod[0].push_back(offset); + } + out->set_lod(lod); } - out->set_lod(lod); } }; diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index d8c97b27b3..f0ffc97066 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -47,8 +47,7 @@ class IncrementOp : public framework::OperatorWithKernel { class IncrementOpMaker : public framework::OpProtoAndCheckerMaker { public: - IncrementOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor of increment operator"); AddOutput("Out", "(Tensor) The output tensor of increment operator."); AddAttr("step", diff --git a/paddle/fluid/operators/is_empty_op.cc b/paddle/fluid/operators/is_empty_op.cc index 2a7be90dab..29b73951bb 100644 --- a/paddle/fluid/operators/is_empty_op.cc +++ b/paddle/fluid/operators/is_empty_op.cc @@ -12,46 +12,41 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/operators/is_empty_op.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" namespace paddle { namespace operators { -constexpr char kInput[] = "X"; -constexpr char kOutput[] = "Out"; - -class IsEmptyOp : public framework::OperatorBase { +class IsEmptyOp : public framework::OperatorWithKernel { public: - IsEmptyOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorBase(type, inputs, outputs, attrs) {} + using framework::OperatorWithKernel::OperatorWithKernel; - private: - void RunImpl(const framework::Scope &scope, - const platform::Place &place) const override { - // get input - auto *var = scope.FindVar(Input(kInput)); - PADDLE_ENFORCE_NOT_NULL(var); - auto &tensor = var->Get(); - // get output - auto *out = scope.FindVar(Output(kOutput)); - PADDLE_ENFORCE_NOT_NULL(out); - auto *out_tensor = out->GetMutable(); + protected: + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of IsEmptyOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of IsEmptyOp should not be null."); + ctx->SetOutputDim("Out", {1}); + } - out_tensor->Resize({1}); - out_tensor->mutable_data(platform::CPUPlace())[0] = - framework::product(tensor.dims()) == 0; + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + framework::OpKernelType kt = framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + platform::CPUPlace()); + return kt; } }; -class IsEmptyOpProtoMaker : public framework::OpProtoAndCheckerMaker { +class IsEmptyOpMaker : public framework::OpProtoAndCheckerMaker { public: - IsEmptyOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput(kInput, "(Tensor) Tensor which is to be checked."); - AddOutput(kOutput, "(Tensor) a boolean Tensor that indicate empty or not."); + void Make() override { + AddInput("X", "(LoDTensor) Tensor which is to be checked."); + AddOutput("Out", + "(LoDTensor) a boolean Tensor that indicate empty or not."); AddComment(R"DOC( IsEmpty Operator which checks whether a tensor is empty. @@ -63,5 +58,12 @@ It will just return product(tensor.ddims()) > 0; } // namespace operators } // namespace paddle -REGISTER_OP_WITHOUT_GRADIENT(is_empty, paddle::operators::IsEmptyOp, - paddle::operators::IsEmptyOpProtoMaker); +namespace ops = paddle::operators; + +REGISTER_OPERATOR(is_empty, ops::IsEmptyOp, ops::IsEmptyOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL( + is_empty, ops::IsEmptyOpKernel, + ops::IsEmptyOpKernel, + ops::IsEmptyOpKernel, + ops::IsEmptyOpKernel); diff --git a/paddle/fluid/operators/is_empty_op.h b/paddle/fluid/operators/is_empty_op.h new file mode 100644 index 0000000000..3e3af22fa8 --- /dev/null +++ b/paddle/fluid/operators/is_empty_op.h @@ -0,0 +1,37 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" + +namespace paddle { +namespace operators { + +template +class IsEmptyOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + // get input + auto* input_tensor = context.Input("X"); + // get output + auto* output_tensor = context.Output("Out"); + + output_tensor->mutable_data(platform::CPUPlace())[0] = + framework::product(input_tensor->dims()) == 0; + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/l1_norm_op.cc b/paddle/fluid/operators/l1_norm_op.cc index 0c143b7c8a..bc115090ac 100644 --- a/paddle/fluid/operators/l1_norm_op.cc +++ b/paddle/fluid/operators/l1_norm_op.cc @@ -48,8 +48,7 @@ class L1NormGradOp : public framework::OperatorWithKernel { class L1NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - L1NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of l1_norm op."); AddOutput("Out", "(Scalar) The output of l1_norm op."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/label_smooth_op.cc b/paddle/fluid/operators/label_smooth_op.cc index a73c626032..da59bd53bc 100644 --- a/paddle/fluid/operators/label_smooth_op.cc +++ b/paddle/fluid/operators/label_smooth_op.cc @@ -47,8 +47,7 @@ class LabelSmoothOp : public framework::OperatorWithKernel { class LabelSmoothOpMaker : public framework::OpProtoAndCheckerMaker { public: - LabelSmoothOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The input labels of LabelSmooth operator. This " "input can be batched labels in one-hot encoding or output from " diff --git a/paddle/fluid/operators/layer_norm_op.cc b/paddle/fluid/operators/layer_norm_op.cc index de1056aef7..14ce1da2e9 100644 --- a/paddle/fluid/operators/layer_norm_op.cc +++ b/paddle/fluid/operators/layer_norm_op.cc @@ -61,38 +61,34 @@ class LayerNormOp : public framework::OperatorWithKernel { class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { public: - LayerNormOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(LoDTensor) The input tensor."); + void Make() override { + AddInput("X", "The input tensor."); AddInput("Scale", - "(Tensor, optional) Scale is a 1-dimensional tensor of size " + "(optional) Scale is a 1-dimensional tensor of size " "H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])." "It is applied to the output.") .AsDispensable(); AddInput("Bias", - "(Tensor, optional) Bias is a 1-dimensional tensor of size " + "(optional) Bias is a 1-dimensional tensor of size " "H(`begin_norm_axis` splits the tensor(`X`) to a matrix [N,H])." "It is applied to the output.") .AsDispensable(); - AddOutput("Y", "(LoDTensor) Result after normalization."); - AddOutput("Mean", "(Tensor) Mean of the current mini batch.") - .AsIntermediate(); - AddOutput("Variance", "(Tensor) Variance of the current mini batch.") + AddOutput("Y", "Result after normalization."); + AddOutput("Mean", "Mean of the current mini batch.").AsIntermediate(); + AddOutput("Variance", "Variance of the current mini batch.") .AsIntermediate(); AddAttr("epsilon", - "(float, default 1e-5) Constant for " - "numerical stability") + "Constant for numerical stability [default 1e-5].") .SetDefault(1e-5) .AddCustomChecker([](const float &epsilon) { PADDLE_ENFORCE(epsilon >= 0.0f && epsilon <= 0.001f, "'epsilon' should be between 0.0 and 0.001."); }); AddAttr("begin_norm_axis", - "(int default:1), the " - "axis of `begin_norm_axis ... Rank(X) - 1` will be " + "the axis of `begin_norm_axis ... Rank(X) - 1` will be " "normalized. `begin_norm_axis` splits the tensor(`X`) to a " - "matrix [N,H].") + "matrix [N,H]. [default 1].") .SetDefault(1) .AddCustomChecker([](const int &begin_norm_axis) { PADDLE_ENFORCE_GT(begin_norm_axis, 0, @@ -100,10 +96,14 @@ class LayerNormOpMaker : public framework::OpProtoAndCheckerMaker { }); AddComment(R"DOC( -Layer Normalization. -Layer Norm has been implemented as discussed in the paper: -https://arxiv.org/abs/1607.06450 -... +Assume feature vectors exist on dimensions +:attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics +along these dimensions for each feature vector :math:`a` with size +:math:`H`, then normalize each feature vector using the corresponding +statistics. After that, apply learnable gain and bias on the normalized +tensor to scale and shift if :attr:`scale` and :attr:`shift` are set. + +Refer to `Layer Normalization `_ )DOC"); } }; diff --git a/paddle/fluid/operators/layer_norm_op.cu b/paddle/fluid/operators/layer_norm_op.cu index 6840e1e08f..0886c41a1b 100644 --- a/paddle/fluid/operators/layer_norm_op.cu +++ b/paddle/fluid/operators/layer_norm_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,8 +12,512 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include #include "paddle/fluid/operators/layer_norm_op.h" +namespace paddle { +namespace operators { + +inline static int GetDesiredBlockDim(int block_dim) { + const int kMaxBlockDim = 512; + return block_dim >= kMaxBlockDim + ? kMaxBlockDim + : (1 << (static_cast(std::log2f(block_dim)))); +} + +#define FIXED_BLOCK_DIM_CASE_BASE(log2_block_dim, ...) \ + case (1 << (log2_block_dim)): { \ + constexpr auto kBlockDim = (1 << (log2_block_dim)); \ + __VA_ARGS__; \ + } break + +#define FIXED_BLOCK_DIM_CASE(...) \ + FIXED_BLOCK_DIM_CASE_BASE(9, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(8, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(7, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(6, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(5, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(4, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(3, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(2, ##__VA_ARGS__); \ + FIXED_BLOCK_DIM_CASE_BASE(1, ##__VA_ARGS__) + +static __device__ __forceinline__ float real_sqrt(float x) { return sqrtf(x); } +static __device__ __forceinline__ double real_sqrt(double x) { return sqrt(x); } + +template +struct PairForLayerNorm { + __device__ __forceinline__ PairForLayerNorm() {} + __device__ __forceinline__ PairForLayerNorm(const T &first, const T &second) + : first_(first), second_(second) {} + + T first_; + T second_; +}; + +template +struct PairForLayerNormAddFunctor { + __device__ __forceinline__ PairForLayerNorm operator()( + const PairForLayerNorm &p1, const PairForLayerNorm &p2) { + return PairForLayerNorm(p1.first_ + p2.first_, p1.second_ + p2.second_); + } +}; + +template +__global__ void LayerNormForward(const T *x, const T *scale, const T *bias, + T *y, T *mean, T *var, float epsilon, + int feature_size) { + using BlockReduce = cub::BlockReduce, BlockDim>; + __shared__ typename BlockReduce::TempStorage temp_storage; + + int beg_idx = blockIdx.x * feature_size + threadIdx.x; + int end_idx = (blockIdx.x + 1) * feature_size; + + // Step 1: Reduce to calculate mean and var + T mean_val = static_cast(0); + T var_val = static_cast(0); + for (int i = beg_idx; i < end_idx; i += BlockDim) { + T tmp = x[i]; + mean_val += tmp; + var_val += (tmp * tmp); + } + auto pair = BlockReduce(temp_storage) + .Reduce(PairForLayerNorm(mean_val, var_val), + PairForLayerNormAddFunctor()); + if (threadIdx.x == 0) { + auto tmp = pair.first_ / feature_size; + mean[blockIdx.x] = tmp; + var[blockIdx.x] = pair.second_ / feature_size - tmp * tmp; + } + __syncthreads(); + mean_val = mean[blockIdx.x]; + var_val = static_cast(real_sqrt(var[blockIdx.x] + epsilon)); + + // Step 2: Calculate y + if (scale != nullptr) { + if (bias != nullptr) { + for (int i = beg_idx, j = threadIdx.x; i < end_idx; + i += BlockDim, j += BlockDim) { + y[i] = scale[j] * (x[i] - mean_val) / var_val + bias[j]; + } + } else { + for (int i = beg_idx, j = threadIdx.x; i < end_idx; + i += BlockDim, j += BlockDim) { + y[i] = scale[j] * (x[i] - mean_val) / var_val; + } + } + } else { // scale == nullptr + if (bias != nullptr) { + for (int i = beg_idx, j = threadIdx.x; i < end_idx; + i += BlockDim, j += BlockDim) { + y[i] = (x[i] - mean_val) / var_val + bias[j]; + } + } else { + for (int i = beg_idx, j = threadIdx.x; i < end_idx; + i += BlockDim, j += BlockDim) { + y[i] = (x[i] - mean_val) / var_val; + } + } + } +} + +// Make sure that d_scale != nullptr && d_bias != nullptr +// Since d_scale != nullptr, scale would not be nullptr +template +__global__ void LayerNormBackwardGradientAll(const T *x, const T *d_y, + T *d_scale, T *d_bias, T *d_x, + const T *mean, const T *var, + const T *scale, float epsilon, + int batch_size, int feature_size) { + using BlockReduce = cub::BlockReduce, BlockDim>; + __shared__ typename BlockReduce::TempStorage temp_storage; + + int beg_idx = threadIdx.x * feature_size + blockIdx.x; + int end_idx = batch_size * feature_size + blockIdx.x; + int stride = BlockDim * feature_size; + + T d_scale_partial = 0, d_bias_partial = 0; + + for (int i = beg_idx; i < end_idx; i += stride) { + int row_idx = i / feature_size; + auto var_val = static_cast(real_sqrt(var[row_idx] + epsilon)); + d_scale_partial += d_y[i] * (x[i] - mean[row_idx]) / var_val; + d_bias_partial += d_y[i]; + if (HasDx) { + d_x[i] = d_y[i] * scale[blockIdx.x] / var_val; + } + } + + auto pair = BlockReduce(temp_storage) + .Reduce(PairForLayerNorm(d_scale_partial, d_bias_partial), + PairForLayerNormAddFunctor()); + + if (threadIdx.x == 0) { + d_scale[blockIdx.x] = pair.first_; + d_bias[blockIdx.x] = pair.second_; + } +} + +// Make sure that there is only one true expression: d_scale != nullptr +// or d_bias != nullptr +// Notice: scale may be nullptr +template +__global__ void LayerNormBackwardGradientScaleOrBias( + const T *x, const T *d_y, T *d_scale, T *d_bias, T *d_x, const T *mean, + const T *var, const T *scale, float epsilon, int batch_size, + int feature_size) { + using BlockReduce = cub::BlockReduce; + __shared__ typename BlockReduce::TempStorage temp_storage; + int beg_idx = threadIdx.x * feature_size + blockIdx.x; + int end_idx = batch_size * feature_size + blockIdx.x; + int stride = BlockDim * feature_size; + T d_scale_or_d_bias_partial = 0; + + for (int i = beg_idx; i < end_idx; i += stride) { + int row_idx = i / feature_size; + auto var_val = static_cast(real_sqrt(var[row_idx] + epsilon)); + if (HasDScale) { + d_scale_or_d_bias_partial += d_y[i] * (x[i] - mean[row_idx]) / var_val; + } else { // d_bias != nullptr + d_scale_or_d_bias_partial += d_y[i]; + } + + if (HasDx) { + if (scale != nullptr) { + d_x[i] = d_y[i] * scale[blockIdx.x] / var_val; + } else { + d_x[i] = d_y[i] / var_val; + } + } + } + + d_scale_or_d_bias_partial = + BlockReduce(temp_storage).Reduce(d_scale_or_d_bias_partial, cub::Sum()); + + if (threadIdx.x == 0) { + if (HasDScale) { + d_scale[blockIdx.x] = d_scale_or_d_bias_partial; + } else { + d_bias[blockIdx.x] = d_scale_or_d_bias_partial; + } + } +} + +template +__global__ void LayerNormBackwardPostProcessToCalculateDX(const T *x, T *d_x, + const T *mean, + const T *var, + float epsilon, + int feature_size) { + using BlockReduce = cub::BlockReduce, BlockDim>; + __shared__ typename BlockReduce::TempStorage temp_storage; + __shared__ T d_x_reduce_tmp[2]; + + int beg_idx = blockIdx.x * feature_size + threadIdx.x; + int end_idx = (blockIdx.x + 1) * feature_size; + + T block_mean = mean[blockIdx.x]; + T block_var = var[blockIdx.x]; + T d_x_mean_partial = 0, d_x_var_partial = 0; + for (int i = beg_idx; i < end_idx; i += BlockDim) { + d_x_mean_partial += d_x[i]; + d_x_var_partial += d_x[i] * (x[i] - block_mean); + } + + auto pair = + BlockReduce(temp_storage) + .Reduce(PairForLayerNorm(d_x_mean_partial, d_x_var_partial), + PairForLayerNormAddFunctor()); + + if (threadIdx.x == 0) { + d_x_reduce_tmp[0] = pair.first_ / feature_size; + d_x_reduce_tmp[1] = pair.second_ / (feature_size * (block_var + epsilon)); + } + __syncthreads(); + + d_x_mean_partial = d_x_reduce_tmp[0]; + d_x_var_partial = d_x_reduce_tmp[1]; + for (int i = beg_idx; i < end_idx; i += BlockDim) { + d_x[i] -= d_x_mean_partial; + d_x[i] -= (x[i] - block_mean) * d_x_var_partial; + } +} + +// Here, we only calculate d_x +template +__global__ void LayerNormBackwardGradientOnlyDX(const T *x, const T *d_y, + T *d_x, const T *mean, + const T *var, const T *scale, + float epsilon, + int feature_size) { + using BlockReduce = cub::BlockReduce, BlockDim>; + __shared__ typename BlockReduce::TempStorage temp_storage; + __shared__ T d_x_reduce_tmp[2]; + + int beg_idx = blockIdx.x * feature_size + threadIdx.x; + int end_idx = (blockIdx.x + 1) * feature_size; + + T block_mean = mean[blockIdx.x], block_var = var[blockIdx.x]; + T d_x_mean_partial = 0, d_x_var_partial = 0; + for (int i = beg_idx; i < end_idx; i += BlockDim) { + auto var_val = static_cast(real_sqrt(block_var + epsilon)); + if (scale != nullptr) { + int col_idx = i % feature_size; + d_x[i] = d_y[i] * scale[col_idx] / var_val; + } else { + d_x[i] = d_y[i] / var_val; + } + d_x_mean_partial += d_x[i]; + d_x_var_partial += d_x[i] * (x[i] - block_mean); + } + + auto pair = + BlockReduce(temp_storage) + .Reduce(PairForLayerNorm(d_x_mean_partial, d_x_var_partial), + PairForLayerNormAddFunctor()); + + if (threadIdx.x == 0) { + d_x_reduce_tmp[0] = pair.first_ / feature_size; + d_x_reduce_tmp[1] = pair.second_ / (feature_size * (block_var + epsilon)); + } + __syncthreads(); + + d_x_mean_partial = d_x_reduce_tmp[0]; + d_x_var_partial = d_x_reduce_tmp[1]; + for (int i = beg_idx; i < end_idx; i += BlockDim) { + d_x[i] -= d_x_mean_partial; + d_x[i] -= (x[i] - block_mean) * d_x_var_partial; + } +} + +template +__global__ void LayerNormBackwardWhenBatchSizeIsOne( + const T *x, const T *d_y, T *d_x, T *d_scale, T *d_bias, const T *mean, + const T *var, const T *scale, float epsilon, int feature_size) { + int idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < feature_size) { + auto var_val = static_cast(real_sqrt(var[idx] + epsilon)); + if (d_x != nullptr) { + if (d_scale == nullptr) { + d_x[idx] = d_y[idx] / var_val; + } else { + d_x[idx] = d_y[idx] * scale[idx] / var_val; + } + } + + if (d_scale != nullptr) { + d_scale[idx] = d_y[idx] * (x[idx] - mean[idx]) / var_val; + } + + if (d_bias != nullptr) d_bias[idx] = d_y[idx]; + } +} + +template +static void LayerNormBackward(const T *x, const T *d_y, const T *scale, + const T *mean, const T *var, T *d_x, T *d_scale, + T *d_bias, float epsilon, int batch_size, + int feature_size, cudaStream_t stream) { + const int kMaxBlockDim = 512; + int gradient_flag = ((d_x != nullptr ? 1 : 0) << 2) | + ((d_scale != nullptr ? 1 : 0) << 1) | + ((d_bias != nullptr ? 1 : 0)); + if (gradient_flag == 0) return; + + if (batch_size == 1) { + LayerNormBackwardWhenBatchSizeIsOne< + T><<<(feature_size + kMaxBlockDim - 1) / kMaxBlockDim, kMaxBlockDim, 0, + stream>>>(x, d_y, d_x, d_scale, d_bias, mean, var, scale, epsilon, + feature_size); + + if (d_x != nullptr) { + switch (GetDesiredBlockDim(feature_size)) { + FIXED_BLOCK_DIM_CASE(LayerNormBackwardPostProcessToCalculateDX< + T, kBlockDim><<<1, kBlockDim, 0, stream>>>( + x, d_x, mean, var, epsilon, feature_size)); + } + } + return; + } + + auto block_dim = GetDesiredBlockDim(batch_size); + switch (gradient_flag) { + case 1: // d_x == nulptr, d_scale == nullptr, d_bias != nullptr + switch (block_dim) { + FIXED_BLOCK_DIM_CASE(LayerNormBackwardGradientScaleOrBias< + T, kBlockDim, false, + false><<>>( + x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, + feature_size)); + } + break; + case 2: // d_x == nullptr, d_scale != nullptr, d_bias == nullptr + switch (block_dim) { + FIXED_BLOCK_DIM_CASE(LayerNormBackwardGradientScaleOrBias< + T, kBlockDim, false, + true><<>>( + x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, + feature_size)); + } + break; + case 3: // d_x == nullptr, d_scale != nulptr, d_bias != nullptr + switch (block_dim) { + FIXED_BLOCK_DIM_CASE( + LayerNormBackwardGradientAll< + T, kBlockDim, false><<>>( + x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, + batch_size, feature_size)); + } + break; + case 4: // d_x != nullptr, d_scale == nullptr, d_bias == nullptr + switch (GetDesiredBlockDim(feature_size)) { + FIXED_BLOCK_DIM_CASE( + LayerNormBackwardGradientOnlyDX< + T, kBlockDim><<>>( + x, d_y, d_x, mean, var, scale, epsilon, feature_size)); + } + break; + case 5: // d_x != nulptr, d_scale == nullptr, d_bias != nullptr + switch (block_dim) { + FIXED_BLOCK_DIM_CASE(LayerNormBackwardGradientScaleOrBias< + T, kBlockDim, true, + false><<>>( + x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, + feature_size)); + } + switch (GetDesiredBlockDim(feature_size)) { + FIXED_BLOCK_DIM_CASE( + LayerNormBackwardPostProcessToCalculateDX< + T, kBlockDim><<>>( + x, d_x, mean, var, epsilon, feature_size)); + } + break; + case 6: // d_x != nullptr, d_scale != nullptr, d_bias == nullptr + switch (block_dim) { + FIXED_BLOCK_DIM_CASE(LayerNormBackwardGradientScaleOrBias< + T, kBlockDim, true, + true><<>>( + x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, batch_size, + feature_size)); + } + switch (GetDesiredBlockDim(feature_size)) { + FIXED_BLOCK_DIM_CASE( + LayerNormBackwardPostProcessToCalculateDX< + T, kBlockDim><<>>( + x, d_x, mean, var, epsilon, feature_size)); + } + break; + case 7: // d_x != nullptr, d_scale != nullptr, d_bias != nullptr + switch (block_dim) { + FIXED_BLOCK_DIM_CASE( + LayerNormBackwardGradientAll< + T, kBlockDim, true><<>>( + x, d_y, d_scale, d_bias, d_x, mean, var, scale, epsilon, + batch_size, feature_size)); + } + switch (GetDesiredBlockDim(feature_size)) { + FIXED_BLOCK_DIM_CASE( + LayerNormBackwardPostProcessToCalculateDX< + T, kBlockDim><<>>( + x, d_x, mean, var, epsilon, feature_size)); + } + break; + default: + break; + } +} + +template +class LayerNormKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + auto *scale = ctx.Input("Scale"); + auto *bias = ctx.Input("Bias"); + auto *x = ctx.Input("X"); + + auto *y = ctx.Output("Y"); + auto *mean = ctx.Output("Mean"); + auto *var = ctx.Output("Variance"); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + + const auto x_dims = x->dims(); + auto *x_data = x->data(); + auto *y_data = y->mutable_data(ctx.GetPlace()); + auto *mean_data = mean->mutable_data(ctx.GetPlace()); + auto *var_data = var->mutable_data(ctx.GetPlace()); + auto *scale_data = (scale == nullptr ? nullptr : scale->data()); + auto *bias_data = (bias == nullptr ? nullptr : bias->data()); + + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int batch_size = static_cast(matrix_dim[0]); + int feature_size = static_cast(matrix_dim[1]); + + auto stream = ctx.cuda_device_context().stream(); + + switch (GetDesiredBlockDim(feature_size)) { + FIXED_BLOCK_DIM_CASE( + LayerNormForward<<>>( + x_data, scale_data, bias_data, y_data, mean_data, var_data, + epsilon, feature_size)); + default: + PADDLE_THROW( + "Product from begin_norm_axis to end must be larger than 1"); + break; + } + } +}; + +template +class LayerNormGradKernel + : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + const float epsilon = ctx.Attr("epsilon"); + // d_x, d_scale, d_bias may be nullptr + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto *d_scale = ctx.Output(framework::GradVarName("Scale")); + auto *d_bias = ctx.Output(framework::GradVarName("Bias")); + + auto *x = ctx.Input("X"); + auto *mean = ctx.Input("Mean"); + auto *var = ctx.Input("Variance"); + auto *scale = ctx.Input("Scale"); + auto *d_y = ctx.Input(framework::GradVarName("Y")); + + auto *x_data = x->data(); + auto *d_y_data = d_y->data(); + auto *mean_data = mean->data(); + auto *var_data = var->data(); + auto *scale_data = (scale == nullptr ? nullptr : scale->data()); + auto *d_scale_data = + (d_scale == nullptr ? nullptr + : d_scale->mutable_data(ctx.GetPlace())); + auto *d_bias_data = + (d_bias == nullptr ? nullptr : d_bias->mutable_data(ctx.GetPlace())); + auto *d_x_data = + (d_x == nullptr ? nullptr : d_x->mutable_data(ctx.GetPlace())); + + const auto &x_dims = x->dims(); + const auto begin_norm_axis = ctx.Attr("begin_norm_axis"); + auto matrix_dim = framework::flatten_to_2d(x_dims, begin_norm_axis); + int batch_size = static_cast(matrix_dim[0]); + int feature_size = static_cast(matrix_dim[1]); + + auto stream = ctx.cuda_device_context().stream(); + + LayerNormBackward(x_data, d_y_data, scale_data, mean_data, var_data, + d_x_data, d_scale_data, d_bias_data, epsilon, + batch_size, feature_size, stream); + } +}; + +#undef FIXED_BLOCK_DIM_CASE_BASE +#undef FIXED_BLOCK_DIM_CASE +} // namespace operators +} // namespace paddle + namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( layer_norm, diff --git a/paddle/fluid/operators/linear_chain_crf_op.cc b/paddle/fluid/operators/linear_chain_crf_op.cc index 2f29e377fd..ea1ca7f59d 100644 --- a/paddle/fluid/operators/linear_chain_crf_op.cc +++ b/paddle/fluid/operators/linear_chain_crf_op.cc @@ -19,8 +19,7 @@ namespace operators { class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { public: - LinearChainCRFOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Emission", "(LoDTensor, default LoDTensor) " "A 2-D LoDTensor with shape [N x D], where N is the size of the " @@ -68,8 +67,6 @@ class LinearChainCRFOpMaker : public framework::OpProtoAndCheckerMaker { "mini-batch. Note: S is equal to the sequence number in a mini-batch. " "The output is no longer a LoDTensor."); AddComment(R"DOC( -LinearChainCRF Operator. - Conditional Random Field defines an undirected probabilistic graph with nodes denoting random variables and edges denoting dependencies between these variables. CRF learns the conditional probability $P(Y|X)$, where @@ -87,6 +84,7 @@ CRF. Please refer to http://www.cs.columbia.edu/~mcollins/fb.pdf and http://cseweb.ucsd.edu/~elkan/250Bwinter2012/loglinearCRFs.pdf for details. Equation: + 1. Denote Input(Emission) to this operator as $x$ here. 2. The first D values of Input(Transition) to this operator are for starting weights, denoted as $a$ here. @@ -109,6 +107,7 @@ Finally, the linear chain CRF operator outputs the logarithm of the conditional likelihood of each training sample in a mini-batch. NOTE: + 1. The feature function for a CRF is made up of the emission features and the transition features. The emission feature weights are NOT computed in this operator. They MUST be computed first before this operator is called. diff --git a/paddle/fluid/operators/listen_and_serv_op.cc b/paddle/fluid/operators/listen_and_serv_op.cc index 8acbf82025..b194807696 100644 --- a/paddle/fluid/operators/listen_and_serv_op.cc +++ b/paddle/fluid/operators/listen_and_serv_op.cc @@ -12,22 +12,27 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include // for removing the port file +#include +#include #include -#include #include // NOLINT #include +#include "gflags/gflags.h" + +#include "paddle/fluid/operators/detail/macros.h" + +#include "paddle/fluid/operators/distributed/request_handler_impl.h" #include "paddle/fluid/operators/listen_and_serv_op.h" -#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { -void RunServer(std::shared_ptr service) { - service->RunSyncUpdate(); +void RunServer(std::shared_ptr service) { + service->StartServer(); VLOG(4) << "RunServer thread end"; } - static void split(const std::string &str, char sep, std::vector *pieces) { pieces->clear(); @@ -57,9 +62,10 @@ static void ParallelExecuteBlocks( framework::Async([&executor, &prepared, &program, &scope, idx]() { int run_block = idx; // thread local try { - executor->RunPreparedContext(prepared[run_block].get(), scope, - false, false); - } catch (std::exception &e) { + VLOG(3) << "running server block: " << run_block + << "pointer: " << prepared[run_block].get(); + executor->RunPreparedContext(prepared[run_block].get(), scope); + } catch (const std::exception &e) { LOG(ERROR) << "run sub program error " << e.what(); } })); @@ -67,180 +73,123 @@ static void ParallelExecuteBlocks( for (size_t i = 0; i < fs.size(); ++i) fs[i].wait(); } -std::atomic_int ListenAndServOp::selected_port_{0}; - ListenAndServOp::ListenAndServOp(const std::string &type, const framework::VariableNameMap &inputs, const framework::VariableNameMap &outputs, const framework::AttributeMap &attrs) : OperatorBase(type, inputs, outputs, attrs) {} +ListenAndServOp::~ListenAndServOp() { Stop(); } + void ListenAndServOp::Stop() { - rpc_service_->Push(LISTEN_TERMINATE_MESSAGE); + rpc_service_->ShutDown(); server_thread_->join(); + auto file_path = string::Sprintf("/tmp/paddle.%d.port", ::getpid()); + remove(file_path.c_str()); } -void ListenAndServOp::SavePort(const std::string &file_path) const { +void ListenAndServOp::SavePort() const { // NOTE: default write file to /tmp/paddle.selected_port - selected_port_ = rpc_service_->GetSelectedPort(); - - std::ofstream port_file; - port_file.open(file_path); - port_file << selected_port_.load(); - port_file.close(); - VLOG(4) << "selected port written to " << file_path; + rpc_service_->SavePort(); } -void ListenAndServOp::WaitServerReady() { - while (selected_port_.load() == 0) { - } +static int64_t GetTimestamp() { + struct timeval tp; + gettimeofday(&tp, NULL); + return tp.tv_sec * 1000 + tp.tv_usec / 1000; } -void ListenAndServOp::RunSyncLoop(framework::Executor *executor, - framework::ProgramDesc *program, - framework::Scope *recv_scope, - framework::BlockDesc *prefetch_block) const { - auto fan_in = Attr("Fanin"); - +void ListenAndServOp::RunSyncLoop( + framework::Executor *executor, framework::ProgramDesc *program, + framework::Scope *recv_scope, + const std::vector &prefetch_block_id_list, + const int checkpoint_point_block_id) const { + VLOG(2) << "RunSyncLoop"; size_t num_blocks = program->Size(); + auto optimize_blocks = + Attr>(kOptimizeBlocks); PADDLE_ENFORCE_GE(num_blocks, 2, "server program should have at least 2 blocks"); - std::vector block_list; - for (size_t blkid = 1; blkid < num_blocks; ++blkid) { - block_list.push_back(blkid); + // Prepare all the server block + std::vector optimize_blocks_list; + for (size_t i = 1; i < program->Size(); ++i) { + optimize_blocks_list.push_back(i); } - auto optimize_prepared = executor->Prepare(*program, block_list); - // Insert placeholder for block0 which holds current op itself. + auto optimize_prepared = executor->Prepare(*program, optimize_blocks_list); + // Insert placeholder for block0 which holds current op itself, + // NOTE the first block in `optimize_prepared` should never be ran. optimize_prepared.insert( optimize_prepared.begin(), std::shared_ptr(nullptr)); - bool exit_flag = false; - // Record received sparse variables, so that - // we could reset those after execute optimize program - std::vector sparse_vars; - while (!exit_flag) { + rpc_service_->ResetBarrierCounter(); + + while (true) { + rpc_service_->Profiler().OneStep(); // Get from multiple trainers, we don't care about the order in which // the gradients arrives, just add suffix 0~n and merge the gradient. - rpc_service_->SetCond(0); - size_t recv_var_cnt = 0; - int batch_barrier = 0; - while (batch_barrier != fan_in) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; - break; - } else if (recv_var_name == BATCH_BARRIER_MESSAGE) { - VLOG(3) << "recv batch barrier message"; - batch_barrier++; - continue; - } else { - VLOG(3) << "received grad: " << recv_var_name; - recv_var_cnt++; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - if (var->IsType()) { - sparse_vars.push_back(var); - } - } - } - if (exit_flag) { - rpc_service_->SetCond(1); - rpc_service_->ShutDown(); + rpc_service_->SetCond(distributed::kRequestSend); + rpc_service_->WaitBarrier(distributed::kRequestSend); + + if (rpc_service_->IsExit()) { + LOG(WARNING) << "get exit!rpc_processor break!"; + rpc_service_->SetCond(distributed::kRequestGet); break; } // NOTE: if is_gpu_place, CUDA kernels are launched by multiple threads // and this will still work. - // The optimize blocks which have the same parent ID would run parallel // TODO(Yancey1989): need to use ParallelExecutor for future - int32_t last_parent_blkid = program->Block(1).Parent(); + int32_t last_parent_blkid = optimize_blocks[0]->Parent(); std::vector parallel_blkids; - parallel_blkids.push_back(1); - double ts = detail::GetTimestamp(); - for (size_t blkid = 2; blkid < num_blocks; ++blkid) { - if (blkid != static_cast(prefetch_block->ID())) { - if (program->Block(blkid).Parent() != last_parent_blkid) { - ParallelExecuteBlocks(parallel_blkids, executor, optimize_prepared, - program, recv_scope); - parallel_blkids.clear(); - last_parent_blkid = program->Block(blkid).Parent(); - } - parallel_blkids.push_back(blkid); + parallel_blkids.push_back(optimize_blocks[0]->ID()); + double ts = GetTimestamp(); + for (size_t i = 1; i < optimize_blocks.size(); ++i) { + // skip the first optimize block because it is already in the + // parallel_blkids. + int blkid = optimize_blocks[i]->ID(); + if (program->Block(blkid).Parent() != last_parent_blkid) { + ParallelExecuteBlocks(parallel_blkids, executor, optimize_prepared, + program, recv_scope); + parallel_blkids.clear(); + last_parent_blkid = program->Block(blkid).Parent(); } + parallel_blkids.push_back(blkid); } ParallelExecuteBlocks(parallel_blkids, executor, optimize_prepared, program, recv_scope); - VLOG(2) << "run all blocks spent " << detail::GetTimestamp() - ts << "(ms)"; - - // Reset the received sparse variables, the sum operator would not - // sum the input sparse variables which rows is empty at the next - // mini-batch. - // TODO(Yancey1989): move the reset action into an operator, we couldn't - // have any hide logic in the operator. - for (auto &var : sparse_vars) { - var->GetMutable()->mutable_rows()->clear(); - } - rpc_service_->SetCond(1); - // FIXME(typhoonzero): use another condition to sync wait clients get. - rpc_service_->WaitClientGet(fan_in); - sparse_vars.clear(); + VLOG(2) << "run all blocks spent " << GetTimestamp() - ts << "(ms)"; + + rpc_service_->SetCond(distributed::kRequestGet); + rpc_service_->WaitBarrier(distributed::kRequestGet); + rpc_service_->ResetBarrierCounter(); + // reset received sparse vars to avoid reuse it in the next mini-batch + dynamic_cast(request_send_handler_.get()) + ->ResetSparseVarRecorder(); } // while(true) } -static void AsyncUpdateThread( - const std::string &var_name, const bool &exit_flag, - const std::shared_ptr &queue, - framework::Executor *executor, - framework::ExecutorPrepareContext *prepared) { - VLOG(3) << "update thread for " << var_name << " started"; - while (!exit_flag) { - const detail::ReceivedMessage v = queue->Pop(); - auto recv_var_name = v.first; - auto var = v.second->GetVar(); - if (var == nullptr) { - LOG(ERROR) << "Can not find server side var: " << recv_var_name; - PADDLE_THROW("Can not find server side var"); - } - auto fs = framework::Async([var_name, &executor, &v, prepared] { - try { - executor->RunPreparedContext(prepared, v.second->GetMutableLocalScope(), - false, false); - } catch (std::exception &e) { - LOG(ERROR) << "run sub program error " << e.what(); - } - }); - fs.wait(); - } -} - void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, - framework::ProgramDesc *program) const { - VLOG(3) << "RunAsyncLoop in"; + framework::ProgramDesc *program, + framework::Scope *recv_scope) const { + VLOG(2) << "RunAsyncLoop"; // grad name to block id std::unordered_map grad_to_block_id; std::unordered_map id_to_grad; - std::unordered_map> - grad_to_queue; auto grad_to_block_id_str = Attr>("grad_to_block_id"); - for (auto &grad_and_id : grad_to_block_id_str) { + for (const auto &grad_and_id : grad_to_block_id_str) { std::vector pieces; split(grad_and_id, ':', &pieces); VLOG(3) << "after split, grad = " << pieces[0] << ", id=" << pieces[1]; PADDLE_ENFORCE_EQ(pieces.size(), 2); PADDLE_ENFORCE_EQ(grad_to_block_id.count(pieces[0]), 0); + int block_id = std::stoi(pieces[1]); grad_to_block_id[pieces[0]] = block_id; - grad_to_queue[pieces[0]] = std::make_shared(); id_to_grad[block_id] = pieces[0]; } size_t num_blocks = program->Size(); @@ -252,6 +201,10 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, block_list.push_back(blkid); } auto optimize_prepared = executor->Prepare(*program, block_list); + // execute global block if needed + if (block_list[0] == 1 && id_to_grad.count(1) == 0) { + executor->RunPreparedContext(optimize_prepared[0].get(), recv_scope); + } std::unordered_map> grad_to_prepared_ctx; @@ -259,40 +212,38 @@ void ListenAndServOp::RunAsyncLoop(framework::Executor *executor, grad_to_prepared_ctx[id_to_grad[block_list[i]]] = optimize_prepared[i]; } - bool exit_flag = false; - - VLOG(3) << "start async optimize threads"; - std::vector> fs; - for (auto iter = grad_to_queue.begin(); iter != grad_to_queue.end(); iter++) { - std::string grad_name = iter->first; - VLOG(3) << "create async update thread for " << grad_name; - fs.push_back(framework::AsyncIO([grad_name, &exit_flag, &executor, - &grad_to_queue, &grad_to_prepared_ctx]() { - AsyncUpdateThread(grad_name, exit_flag, grad_to_queue[grad_name], - executor, grad_to_prepared_ctx[grad_name].get()); - })); - } + request_send_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); + request_get_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); + request_prefetch_handler_->SetGradToPreparedCtx(&grad_to_prepared_ctx); - VLOG(3) << "RunAsyncLoop into while"; - while (!exit_flag) { - const detail::ReceivedMessage v = rpc_service_->Get(); - auto recv_var_name = v.first; - if (recv_var_name == LISTEN_TERMINATE_MESSAGE) { - LOG(INFO) << "received terminate message and exit"; - exit_flag = true; + while (true) { + if (rpc_service_->IsExit()) { + VLOG(4) << "get exit!rpc_processor break!"; break; - } else { - VLOG(3) << "received grad: " << recv_var_name; - grad_to_queue[recv_var_name]->Push(v); } - if (exit_flag) { - rpc_service_->ShutDown(); - break; - } + sleep(1); } // while(true) } +static void FillRequestCtx( + distributed::RequestHandler *h, framework::Scope *scope, + platform::DeviceContext *dev_ctx, framework::Executor *executor, + framework::ProgramDesc *program, + std::unordered_map> + *prefetch_ctx, + std::shared_ptr checkpoint_ctx, + distributed::RPCServer *rpc_server) { + h->SetScope(scope); + h->SetDevCtx(dev_ctx); + h->SetExecutor(executor); + h->SetProgram(program); + h->SetPrefetchPreparedCtx(prefetch_ctx); + h->SetRPCServer(rpc_server); + h->SetCheckpointNotifyPreparedCtx(checkpoint_ctx); +} + void ListenAndServOp::RunImpl(const framework::Scope &scope, const platform::Place &dev_place) const { // Mark this as PS that it should decide profiling by listening from trainer. @@ -302,56 +253,114 @@ void ListenAndServOp::RunImpl(const framework::Scope &scope, framework::Scope &recv_scope = scope.NewScope(); bool sync_mode = Attr("sync_mode"); + auto fan_in = Attr("Fanin"); PADDLE_ENFORCE(!rpc_service_); std::string endpoint = Attr("endpoint"); - - rpc_service_.reset(new detail::AsyncGRPCServer(endpoint, sync_mode)); - - auto *optimize_block = Attr(kOptimizeBlock); - auto *prefetch_block = Attr(kPrefetchBlock); - auto *program = optimize_block->Program(); + int checkpoint_block_id = Attr(kCheckpointBlockId); + + VLOG(4) << "sync_mode:" << sync_mode << ", fan_in:" << fan_in + << ", end_point:" << endpoint + << ", checkpoint_block_id: " << checkpoint_block_id; + + rpc_service_.reset(new RPCSERVER_T(endpoint, fan_in)); + + request_send_handler_.reset(new distributed::RequestSendHandler(sync_mode)); + request_get_handler_.reset(new distributed::RequestGetHandler(sync_mode)); + request_prefetch_handler_.reset( + new distributed::RequestPrefetchHandler(sync_mode)); + request_checkpoint_handler_.reset(new distributed::RequestCheckpointHandler( + sync_mode, checkpoint_block_id)); + + rpc_service_->RegisterRPC(distributed::kRequestSend, + request_send_handler_.get()); + rpc_service_->RegisterRPC(distributed::kRequestGet, + request_get_handler_.get()); + rpc_service_->RegisterRPC(distributed::kRequestPrefetch, + request_prefetch_handler_.get()); + rpc_service_->RegisterRPC(distributed::kRequestCheckpoint, + request_checkpoint_handler_.get()); + + auto optimize_blocks = + Attr>(kOptimizeBlocks); + PADDLE_ENFORCE(optimize_blocks.size() >= 1, + "optimize blocks should be 1 at least on the pserver side."); + auto *program = optimize_blocks[0]->Program(); framework::Executor executor(dev_place); - // prepare rpc_service - rpc_service_->SetScope(&recv_scope); - rpc_service_->SetDevCtx(&dev_ctx); - rpc_service_->SetProgram(program); - rpc_service_->SetExecutor(&executor); + std::shared_ptr ckpt_pre_context = nullptr; + if (checkpoint_block_id != -1) { + auto ctx = executor.Prepare(*program, checkpoint_block_id); + // see: https://stackoverflow.com/a/14856553 + ckpt_pre_context = std::move(ctx); + } // prepare for prefetch - VLOG(3) << "prefetch block id is " << prefetch_block->ID(); - auto prefetch_prepared = executor.Prepare(*program, prefetch_block->ID()); - rpc_service_->SetPrefetchPreparedCtx(prefetch_prepared.get()); - prefetch_prepared.release(); + std::vector prefetch_block_id_list; + std::unordered_map block_id_to_prefetch_var_name; + + auto prefetch_var_name_to_block_id_str = + Attr>(kPrefetchVarNameToBlockId); + for (const auto &prefetch_var_name_and_id : + prefetch_var_name_to_block_id_str) { + std::vector pieces; + split(prefetch_var_name_and_id, ':', &pieces); + VLOG(3) << "after split, prefetch_var = " << pieces[0] + << ", id=" << pieces[1]; + PADDLE_ENFORCE_EQ(pieces.size(), 2); + + int block_id = std::stoi(pieces[1]); + prefetch_block_id_list.push_back(block_id); + block_id_to_prefetch_var_name[block_id] = pieces[0]; + } + + auto prefetch_prepared = executor.Prepare(*program, prefetch_block_id_list); + + std::unordered_map> + prefetch_var_name_to_prepared_ctx; + for (size_t i = 0; i < prefetch_block_id_list.size(); ++i) { + auto block_id = prefetch_block_id_list[i]; + auto prefetch_var_name = block_id_to_prefetch_var_name[block_id]; + prefetch_var_name_to_prepared_ctx[prefetch_var_name] = prefetch_prepared[i]; + } + + auto f = + std::bind(FillRequestCtx, std::placeholders::_1, &recv_scope, &dev_ctx, + &executor, program, &prefetch_var_name_to_prepared_ctx, + ckpt_pre_context, rpc_service_.get()); + + f(request_send_handler_.get()); + f(request_get_handler_.get()); + f(request_prefetch_handler_.get()); + f(request_checkpoint_handler_.get()); // start the server listening after all member initialized. server_thread_.reset(new std::thread(RunServer, rpc_service_)); VLOG(3) << "wait server thread to become ready..."; rpc_service_->WaitServerReady(); + // register SIGINT(from ctrl+C) and SIGTERM(from kill) signal handlers + signal(SIGINT, SignalHandler::StopAndExit); + signal(SIGTERM, SignalHandler::StopAndExit); + // Write to a file of server selected port for python use. - std::string file_path = string::Sprintf("/tmp/paddle.%d.selected_port", - static_cast(::getpid())); - SavePort(file_path); + SavePort(); if (sync_mode) { - RunSyncLoop(&executor, program, &recv_scope, prefetch_block); + RunSyncLoop(&executor, program, &recv_scope, prefetch_block_id_list, + checkpoint_block_id); } else { - RunAsyncLoop(&executor, program); + RunAsyncLoop(&executor, program, &recv_scope); } } class ListenAndServOpMaker : public framework::OpProtoAndCheckerMaker { public: - ListenAndServOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "(Tensor) Variables that server recv.").AsDuplicable(); - AddComment(R"DOC( -ListenAndServ operator - -This operator will start a RPC server which can receive variables -from send_op and send back variables to recv_op. -)DOC"); + AddComment(R"DOC(" + "ListenAndServ operator" + "\n" + "This operator" + +" will start a RPC server which can receive variables from send_op and send" + +"back variables to recv_op.)DOC"); AddAttr("endpoint", "(string, default 127.0.0.1:6164)" "IP address to listen on.") @@ -363,15 +372,26 @@ from send_op and send back variables to recv_op. "a map from grad name to it's optimize block id") .SetDefault({}); AddAttr("sync_mode", "if works at sync_mode or not").SetDefault(true); - AddAttr(kOptimizeBlock, - "BlockID to run on server side."); - AddAttr(kPrefetchBlock, - "prefetch block to run on server side."); + AddAttr>( + kOptimizeBlocks, "Optimize blocks to run on server side.") + .SetDefault({}); + AddAttr>(kPrefetchVarNameToBlockId, + "prefetch blocks to run on server side.") + .SetDefault({}); AddAttr("Fanin", "How many clients send to this server.") .SetDefault(1); + AddAttr(kCheckpointBlockId, + "BolckID to run save checkpoint on pserer.") + .SetDefault(-1); } }; +void SignalHandler::StopAndExit(int signal_num) { + // Do not use VLOG here for the device for printing maybe already released. + // exit will release interal allocated resoureces. + exit(0); +} + } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/listen_and_serv_op.h b/paddle/fluid/operators/listen_and_serv_op.h index f52a55c5c2..978969cc51 100644 --- a/paddle/fluid/operators/listen_and_serv_op.h +++ b/paddle/fluid/operators/listen_and_serv_op.h @@ -16,22 +16,25 @@ limitations under the License. */ #include #include -#include +#include #include +#include #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" -#include "paddle/fluid/operators/detail/grpc_server.h" +#include "paddle/fluid/operators/distributed/request_handler.h" +#include "paddle/fluid/operators/distributed/rpc_server.h" namespace paddle { namespace operators { -constexpr char kOptimizeBlock[] = "OptimizeBlock"; -constexpr char kPrefetchBlock[] = "PrefetchBlock"; +constexpr char kOptimizeBlocks[] = "optimize_blocks"; +constexpr char kPrefetchVarNameToBlockId[] = "prefetch_var_name_to_block_id"; +constexpr char kCheckpointBlockId[] = "checkpint_block_id"; -void RunServer(std::shared_ptr service); +void RunServer(std::shared_ptr service); class ListenAndServOp : public framework::OperatorBase { public: @@ -40,33 +43,45 @@ class ListenAndServOp : public framework::OperatorBase { const framework::VariableNameMap& outputs, const framework::AttributeMap& attrs); + virtual ~ListenAndServOp(); + void RunSyncLoop(framework::Executor* executor, framework::ProgramDesc* program, framework::Scope* recv_scope, - framework::BlockDesc* prefetch_block) const; + const std::vector& prefetch_block_id_list, + const int checkpoint_point_block_id) const; void RunAsyncLoop(framework::Executor* executor, - framework::ProgramDesc* program) const; - - void SavePort( - const std::string& file_path = "/tmp/paddle.selected_port") const; + framework::ProgramDesc* program, + framework::Scope* recv_scope) const; - void WaitServerReady(); + void SavePort() const; - int GetSelectedPort() { return selected_port_; } + int GetSelectedPort() { return rpc_service_->GetSelectedPort(); } void Stop() override; void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override; - static void ResetPort() { selected_port_ = 0; } - protected: - mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr rpc_service_; + mutable std::shared_ptr request_send_handler_; + mutable std::shared_ptr request_get_handler_; + mutable std::shared_ptr + request_prefetch_handler_; + mutable std::shared_ptr + request_checkpoint_handler_; + mutable std::shared_ptr server_thread_; - // FIXME(wuyi): it's static so that the operator can be cloned. - static std::atomic_int selected_port_; +}; + +class SignalHandler { + public: + static void StopAndExit(int signal_num); + + private: + DISABLE_COPY_AND_ASSIGN(SignalHandler); }; } // namespace operators diff --git a/paddle/fluid/operators/load_combine_op.cc b/paddle/fluid/operators/load_combine_op.cc index e5353144e9..0522a94195 100644 --- a/paddle/fluid/operators/load_combine_op.cc +++ b/paddle/fluid/operators/load_combine_op.cc @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include - +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" @@ -31,6 +31,7 @@ class LoadCombineOp : public framework::OperatorBase { void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { auto filename = Attr("file_path"); + auto load_as_fp16 = Attr("load_as_fp16"); std::ifstream fin(filename); PADDLE_ENFORCE(static_cast(fin), @@ -59,17 +60,25 @@ class LoadCombineOp : public framework::OperatorBase { // Get data from fin to tensor DeserializeFromStream(fin, tensor, dev_ctx); - if (platform::is_gpu_place(place)) { - // copy CPU to GPU - framework::LoDTensor cpu_tensor; - cpu_tensor.ShareDataWith(*tensor); - cpu_tensor.set_lod(tensor->lod()); - - // reset tensor + auto in_dtype = framework::ToDataType(tensor->type()); + auto out_dtype = + load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + // convert to float16 tensor + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor fp16_tensor; + // copy LoD info to the new tensor + fp16_tensor.set_lod(tensor->lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, *tensor, + &fp16_tensor); + + // reset output tensor out_var->Clear(); tensor = out_var->GetMutable(); - tensor->set_lod(cpu_tensor.lod()); - TensorCopy(cpu_tensor, place, dev_ctx, tensor); + tensor->set_lod(fp16_tensor.lod()); + tensor->ShareDataWith(fp16_tensor); } } } @@ -77,12 +86,18 @@ class LoadCombineOp : public framework::OperatorBase { class LoadCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoadCombineOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddOutput( "Out", "(vector) The output LoDTensors that will be read from the input file.") .AsDuplicable(); + AddAttr( + "load_as_fp16", + "(boolean, default false)" + "If true, the tensor will be first loaded and then " + "converted to float16 data type. Otherwise, the tensor will be " + "directly loaded without data type conversion.") + .SetDefault(false); AddAttr("file_path", "(string) " "LoDTensors will be loaded from \"file_path\".") diff --git a/paddle/fluid/operators/load_op.cc b/paddle/fluid/operators/load_op.cc index c6bd2bf3df..27e26cb1b5 100644 --- a/paddle/fluid/operators/load_op.cc +++ b/paddle/fluid/operators/load_op.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/profiler.h" @@ -30,9 +31,8 @@ class LoadOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { - auto *dev_ctx = platform::DeviceContextPool::Instance().Get(place); - platform::RecordEvent record_event(Type(), dev_ctx); - + // FIXME(yuyang18): We save variable to local file now, but we should change + // it to save an output stream. auto filename = Attr("file_path"); std::ifstream fin(filename); PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s for load op", @@ -43,28 +43,75 @@ class LoadOp : public framework::OperatorBase { PADDLE_ENFORCE(out_var != nullptr, "Output variable %s cannot be found", out_var_name); - auto *tensor = out_var->GetMutable(); + if (out_var->IsType()) { + LoadLodTensor(fin, place, out_var); + } else if (out_var->IsType()) { + LoadSelectedRows(fin, place, out_var); + } else { + PADDLE_ENFORCE( + false, + "Load only support LoDTensor and SelectedRows, %s has wrong type", + out_var_name); + } + } + + void LoadLodTensor(std::istream &fin, const platform::Place &place, + framework::Variable *var) const { + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + auto *tensor = var->GetMutable(); + DeserializeFromStream(fin, tensor, dev_ctx); + + auto load_as_fp16 = Attr("load_as_fp16"); + auto in_dtype = framework::ToDataType(tensor->type()); + auto out_dtype = load_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + // convert to float16 tensor + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor fp16_tensor; + // copy LoD info to the new tensor + fp16_tensor.set_lod(tensor->lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, *tensor, + &fp16_tensor); - DeserializeFromStream(fin, tensor, *dev_ctx); + // reset output tensor + var->Clear(); + tensor = var->GetMutable(); + tensor->set_lod(fp16_tensor.lod()); + tensor->ShareDataWith(fp16_tensor); + } + } + + void LoadSelectedRows(std::istream &fin, const platform::Place &place, + framework::Variable *var) const { + auto *selectedRows = var->GetMutable(); + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + framework::DeserializeFromStream(fin, selectedRows, dev_ctx); } }; class LoadOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoadOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddOutput("Out", "(Tensor) The tensor need to be loaded"); + void Make() override { + AddOutput("Out", "The LoDTensor / SelectedRows need to be loaded"); + AddAttr( + "load_as_fp16", + "If true, the tensor will be first loaded and then " + "converted to float16 data type. Otherwise, the tensor will be " + "directly loaded without data type conversion. Default is false.") + .SetDefault(false); AddAttr("file_path", - "(string) " - "Variable will be loaded from \"file_path\".") + R"(Variable will be loaded from "file_path")") .AddCustomChecker( [](const std::string &path) { return !path.empty(); }); - AddComment(R"DOC( -Load Operator. - -Load operator will load a tensor variable from disk file. - -)DOC"); + AddComment( + "Load operator will load a LoDTensor / SelectedRows variable from disk " + "file."); } }; } // namespace operators diff --git a/paddle/fluid/operators/lod_array_length_op.cc b/paddle/fluid/operators/lod_array_length_op.cc index e621240577..e4551b8ba6 100644 --- a/paddle/fluid/operators/lod_array_length_op.cc +++ b/paddle/fluid/operators/lod_array_length_op.cc @@ -40,8 +40,7 @@ class LoDArrayLengthOp : public framework::OperatorBase { class LoDArrayLengthProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDArrayLengthProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensorArray) The input tensor array."); AddOutput("Out", "(Tensor) 1x1 CPU Tensor of length, int64_t"); AddComment(R"DOC( diff --git a/paddle/fluid/operators/lod_rank_table_op.cc b/paddle/fluid/operators/lod_rank_table_op.cc index 590b44e14f..166952fe23 100644 --- a/paddle/fluid/operators/lod_rank_table_op.cc +++ b/paddle/fluid/operators/lod_rank_table_op.cc @@ -38,8 +38,7 @@ class LoDRankTableOp : public framework::OperatorBase { class LoDRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDRankTableOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) input lod tensor, must contain lod information."); AddOutput("Out", "(LoDRankTable) The rank table of specific level."); diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index 92ebfc274b..0d4e84e850 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -47,8 +47,7 @@ class LoDResetOp : public framework::OperatorWithKernel { class LoDResetOpMaker : public framework::OpProtoAndCheckerMaker { public: - LoDResetOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, LoDTensor) Input variable of LoDResetOp which " "could be a Tensor or LoDTensor, where the data of output " diff --git a/paddle/fluid/operators/lod_tensor_to_array_op.cc b/paddle/fluid/operators/lod_tensor_to_array_op.cc index 543495ce4e..00ba5ce8ee 100644 --- a/paddle/fluid/operators/lod_tensor_to_array_op.cc +++ b/paddle/fluid/operators/lod_tensor_to_array_op.cc @@ -105,8 +105,7 @@ class LoDTensorToArrayOp : public framework::OperatorBase { class LoDTensorToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - LoDTensorToArrayOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", ""); AddInput("RankTable", ""); AddOutput("Out", ""); diff --git a/paddle/fluid/operators/log_loss_op.cc b/paddle/fluid/operators/log_loss_op.cc index a8258a1afd..9d248e0321 100644 --- a/paddle/fluid/operators/log_loss_op.cc +++ b/paddle/fluid/operators/log_loss_op.cc @@ -46,8 +46,7 @@ class LogLossOp : public framework::OperatorWithKernel { template class LogLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - LogLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Predicted", "The input value (Predicted) of Log loss op." "Predicted is a 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/logical_op.cc b/paddle/fluid/operators/logical_op.cc index 41aa00ee8a..26970db8d2 100644 --- a/paddle/fluid/operators/logical_op.cc +++ b/paddle/fluid/operators/logical_op.cc @@ -21,8 +21,7 @@ namespace operators { template class BinaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - BinaryLogicalOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { OpComment comment; AddInput("X", string::Sprintf("(LoDTensor) Left hand operand of %s operator", @@ -45,8 +44,7 @@ Each element of Out is calculated by %s template class UnaryLogicalOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - UnaryLogicalOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { OpComment comment; AddInput("X", string::Sprintf("(LoDTensor) Operand of %s operator", comment.type)); @@ -148,6 +146,6 @@ REGISTER_UNARY_LOGICAL_OP(logical_not, "$$Out = !X$$"); REGISTER_UNARY_LOGICAL_KERNEL(logical_not, CPU, paddle::operators::LogicalNotFunctor); REGISTER_BINARY_LOGICAL_OP(logical_xor, - "$$Out = (X || Y) \\, \\&\\& \\, !(X \\&\\& Y)$$"); + "$$Out = (X || Y) \\&\\& !(X \\&\\& Y)$$"); REGISTER_BINARY_LOGICAL_KERNEL(logical_xor, CPU, paddle::operators::LogicalXorFunctor); diff --git a/paddle/fluid/operators/lookup_sparse_table_op.cc b/paddle/fluid/operators/lookup_sparse_table_op.cc index 66b626ed79..2ce11e712f 100644 --- a/paddle/fluid/operators/lookup_sparse_table_op.cc +++ b/paddle/fluid/operators/lookup_sparse_table_op.cc @@ -105,8 +105,7 @@ class LookupSparseTableOp : public framework::OperatorBase { class LookupSparseTableOpMaker : public framework::OpProtoAndCheckerMaker { public: - LookupSparseTableOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("W", "(SelectedRows) The input represents embedding table, " "which is a learnable parameter."); @@ -128,7 +127,7 @@ class LookupSparseTableOpMaker : public framework::OpProtoAndCheckerMaker { .SetDefault(-1.0f); AddAttr("max", "(float, default 1.0) " - "Maximun value of uniform random") + "Maximum value of uniform random") .SetDefault(1.0f); AddAttr("seed", "(int, default 0) " diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 5e59bd1b17..d77b095c5d 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -32,20 +32,21 @@ class LookupTableOp : public framework::OperatorWithKernel { auto table_dims = ctx->GetInputDim("W"); auto ids_dims = ctx->GetInputDim("Ids"); + int ids_rank = ids_dims.size(); - auto ids_var_type = ctx->GetInputsVarType("Ids").front(); - // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type - // is LoDTensor, this tensor contains the ids to be looked up in W - // and it must be a column vector with rank = 2 while the 2nd dimension - // size must be 1, when Ids's type is SelectedRows, the rows of Ids - // contains the ids to be looked up in W; - if (ids_var_type == framework::proto::VarType::LOD_TENSOR) { - PADDLE_ENFORCE_EQ(ids_dims.size(), 2); - PADDLE_ENFORCE_EQ(ids_dims[1], 1); - } + PADDLE_ENFORCE_EQ(table_dims.size(), 2); + PADDLE_ENFORCE_EQ(ids_dims[ids_rank - 1], 1, + "The last dimension of the 'Ids' tensor must be 1."); + + auto output_dims = + framework::vectorize(framework::slice_ddim(ids_dims, 0, ids_rank - 1)); + output_dims.push_back(table_dims[1]); + ctx->SetOutputDim("Out", framework::make_ddim(output_dims)); - ctx->SetOutputDim("Out", {ids_dims[0], table_dims[1]}); - ctx->ShareLoD("Ids", /*->*/ "Out"); + if (ctx->GetOutputsVarType("Out")[0] == + framework::proto::VarType::LOD_TENSOR) { + ctx->ShareLoD("Ids", /*->*/ "Out"); + } } protected: @@ -58,22 +59,15 @@ class LookupTableOp : public framework::OperatorWithKernel { class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { public: - LookupTableOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("W", "(Tensor) The input represents embedding tensors, " "which is a learnable parameter."); - AddInput( - "Ids", - "(Tensor or SelectedRows) Ids's type can be Tensor or " - "SelectedRows, when Ids's type is Tensor, this tensor contains " - "the ids to be looked up in W and it must be a column vector with " - "rank = 2 while the 2nd dimension size must be 1; when Ids's type is " - "SelectedRows, the rows of Ids contains the ids to be looked up " - "in W."); - AddOutput("Out", - "(Tensor or SelectedRows) The lookup results, which have the " - "same type as W."); + AddInput("Ids", + "An input with type int32 or int64 " + "contains the ids to be looked up in W. " + "The last dimension size must be 1."); + AddOutput("Out", "The lookup results, which have the same type as W."); AddAttr("is_sparse", "(boolean, default false) " "Sparse update.") @@ -91,15 +85,10 @@ class LookupTableOpMaker : public framework::OpProtoAndCheckerMaker { Lookup Table Operator. This operator is used to perform lookups on the parameter W, -then concatenated into a dense or sparse tensor. - -The type of Ids(Input) is SelectedRows, Tensor or LoDTensor, when Ids's -type is SelectedRows, the rows of Ids contains the ids to be looked up in W; -when Ids's type is Tensor, this tensor contains the ids to be looked up in W -and it must be a column vector with rank = 2 while the 2nd dimension size must be 1, -at this time, Ids can carry the LoD (Level of Details) information, or not, and -the output only shares the LoD information with input Ids. +then concatenated into a dense tensor. +The input Ids can carry the LoD (Level of Details) information, +or not. And the output only shares the LoD information with input Ids. )DOC"); } diff --git a/paddle/fluid/operators/lookup_table_op.cu b/paddle/fluid/operators/lookup_table_op.cu index 77722c50d3..74823dab09 100644 --- a/paddle/fluid/operators/lookup_table_op.cu +++ b/paddle/fluid/operators/lookup_table_op.cu @@ -23,7 +23,7 @@ namespace operators { template -__global__ void LookupTable(T* output, const T* table, const int64_t* ids, +__global__ void LookupTable(T *output, const T *table, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const int64_t padding_idx) { int idx = threadIdx.x; @@ -33,8 +33,8 @@ __global__ void LookupTable(T* output, const T* table, const int64_t* ids, int64_t id = ids[idy]; PADDLE_ASSERT(id >= 0); PADDLE_ASSERT(id < N); - T* out = output + idy * D; - const T* tab = table + id * D; + T *out = output + idy * D; + const T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { if (PaddingFlag) { if (id == padding_idx) @@ -50,7 +50,7 @@ __global__ void LookupTable(T* output, const T* table, const int64_t* ids, } template -__global__ void LookupTableGrad(T* table, const T* output, const int64_t* ids, +__global__ void LookupTableGrad(T *table, const T *output, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; @@ -60,8 +60,8 @@ __global__ void LookupTableGrad(T* table, const T* output, const int64_t* ids, int id = ids[idy]; PADDLE_ASSERT(id >= 0); PADDLE_ASSERT(id < N); - const T* out = output + idy * D; - T* tab = table + id * D; + const T *out = output + idy * D; + T *tab = table + id * D; for (int i = idx; i < D; i += BlockDimX) { paddle::platform::CudaAtomicAdd(&tab[i], out[i]); } @@ -72,36 +72,19 @@ __global__ void LookupTableGrad(T* table, const T* output, const int64_t* ids, template class LookupTableCUDAKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { - auto* table_t = context.Input("W"); + void Compute(const framework::ExecutionContext &context) const override { + auto *table_t = context.Input("W"); + auto *ids_t = context.Input("Ids"); + auto *output_t = context.Output("Out"); int64_t padding_idx = context.Attr("padding_idx"); - auto* ids_var = context.InputVar("Ids"); - Tensor* output_t = context.Output("Out"); - - int64_t* ids; - int64_t K; - - // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type - // is LoDTensor, this tensor contains the ids to be looked up in W; - // when Ids's type is SelectedRows, the rows of Ids contains the - // ids to be looked up in W. - if (ids_var->IsType()) { - auto* ids_t = context.Input("Ids"); - ids = const_cast(ids_t->data()); - K = ids_t->numel(); - } else if (ids_var->IsType()) { - auto* ids_t = context.Input("Ids"); - ids = const_cast(ids_t->rows().CUDAData(context.GetPlace())); - K = ids_t->rows().size(); - output_t->Resize({K, table_t->dims()[1]}); - } else { - PADDLE_THROW("Unsupported Variable Type of Ids"); - } size_t N = table_t->dims()[0]; size_t D = table_t->dims()[1]; - auto* table = table_t->data(); - auto* output = output_t->mutable_data(context.GetPlace()); + size_t K = ids_t->numel(); + + auto *ids = ids_t->data(); + auto *table = table_t->data(); + auto *output = output_t->mutable_data(context.GetPlace()); dim3 threads(128, 8); dim3 grids(8, 1); @@ -122,41 +105,44 @@ class LookupTableCUDAKernel : public framework::OpKernel { template class LookupTableGradCUDAKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { - auto& dev_ctx = + void Compute(const framework::ExecutionContext &context) const override { + auto &dev_ctx = context.template device_context(); bool is_sparse = context.Attr("is_sparse"); // Since paddings are not trainable and fixed in forward, the gradient of // paddings makes no sense and we don't deal with it in backward. if (is_sparse) { - auto* ids = context.Input("Ids"); - auto* table = context.Input("W"); - auto* d_output = context.Input(framework::GradVarName("Out")); - auto* d_table = context.Output(framework::GradVarName("W")); + auto *ids = context.Input("Ids"); + auto *table = context.Input("W"); + auto *d_output = context.Input(framework::GradVarName("Out")); + auto *d_table = context.Output(framework::GradVarName("W")); - auto* ids_data = ids->data(); - auto ids_dim = ids->dims(); + auto *ids_data = ids->data(); + int64_t ids_num = ids->numel(); auto stream = dev_ctx.stream(); // copy GPU memory to CPU pinned memory framework::Vector new_rows; - new_rows.resize(ids_dim[0]); + new_rows.resize(ids_num); auto gpu_place = boost::get(context.GetPlace()); // TODO(yuyang18): Strange code here. memory::Copy(platform::CPUPlace(), new_rows.CUDAMutableData(context.GetPlace()), gpu_place, - ids_data, ids_dim[0] * sizeof(int64_t), stream); + ids_data, ids_num * sizeof(int64_t), stream); d_table->set_rows(new_rows); - auto* d_table_value = d_table->mutable_value(); - d_table_value->Resize({ids_dim[0], table->dims()[1]}); + auto *d_table_value = d_table->mutable_value(); + d_table_value->Resize({ids_num, table->dims()[1]}); d_table_value->mutable_data(context.GetPlace()); - auto* d_table_data = d_table_value->data(); - auto* d_output_data = d_output->data(); - PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); + auto *d_table_data = d_table_value->data(); + auto *d_output_data = d_output->data(); + auto d_output_dims = d_output->dims(); + PADDLE_ENFORCE_EQ( + d_table_value->dims(), + framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1)); memory::Copy(gpu_place, d_table_data, gpu_place, d_output_data, d_output->numel() * sizeof(T), stream); @@ -168,9 +154,9 @@ class LookupTableGradCUDAKernel : public framework::OpKernel { int N = d_table_t->dims()[0]; int D = d_table_t->dims()[1]; int K = ids_t->numel(); - const int64_t* ids = ids_t->data(); - const T* d_output = d_output_t->data(); - T* d_table = d_table_t->mutable_data(context.GetPlace()); + const int64_t *ids = ids_t->data(); + const T *d_output = d_output_t->data(); + T *d_table = d_table_t->mutable_data(context.GetPlace()); auto t = framework::EigenVector::Flatten(*d_table_t); t.device(*dev_ctx.eigen_device()) = t.constant(static_cast(0)); diff --git a/paddle/fluid/operators/lookup_table_op.h b/paddle/fluid/operators/lookup_table_op.h index d482506bf0..f5c10ced83 100644 --- a/paddle/fluid/operators/lookup_table_op.h +++ b/paddle/fluid/operators/lookup_table_op.h @@ -36,43 +36,13 @@ template class LookupTableKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext &context) const override { + auto *ids_t = context.Input("Ids"); // int tensor + auto *output_t = context.Output("Out"); // float tensor auto *table_var = context.InputVar("W"); - auto *ids_var = context.InputVar("Ids"); - Tensor *output_t = context.Output("Out"); - int64_t padding_idx = context.Attr("padding_idx"); - - DDim table_dim; - if (table_var->IsType()) { - table_dim = context.Input("W")->dims(); - } else if (table_var->IsType()) { - auto *table_t = context.Input("W"); - table_dim = table_t->value().dims(); - } else { - PADDLE_THROW( - "The parameter W of a LookupTable " - "must be either LoDTensor or SelectedRows"); - } - - int64_t *ids; - int64_t ids_numel; - - // The type of Ids(Input) is SelectedRows or LoDTensor, when Ids's type - // is LoDTensor, this tensor contains the ids to be looked up in W; - // when Ids's type is SelectedRows, the rows of Ids contains the - // ids to be looked up in W. - if (ids_var->IsType()) { - auto *ids_t = context.Input("Ids"); - ids = const_cast(ids_t->data()); - ids_numel = ids_t->numel(); - } else if (ids_var->IsType()) { - auto *ids_t = context.Input("Ids"); - ids = const_cast(ids_t->rows().data()); - ids_numel = ids_t->rows().size(); - output_t->Resize({ids_numel, table_dim[1]}); - } else { - PADDLE_THROW("Unsupported Variable Type of Ids"); - } + int64_t padding_idx = context.Attr("padding_idx"); + int64_t *ids = const_cast(ids_t->data()); + int64_t ids_numel = ids_t->numel(); if (table_var->IsType()) { auto *table_t = context.Input("W"); @@ -139,17 +109,17 @@ class LookupTableGradKernel : public framework::OpKernel { auto *d_table = context.Output(framework::GradVarName("W")); auto *ids_data = ids->data(); - auto ids_dim = ids->dims(); + int64_t ids_num = ids->numel(); framework::Vector new_rows; - new_rows.reserve(ids_dim[0]); - for (int64_t i = 0; i < ids_dim[0]; i++) { + new_rows.reserve(ids_num); + for (int64_t i = 0; i < ids_num; i++) { new_rows.push_back(ids_data[i]); } d_table->set_rows(new_rows); auto *d_table_value = d_table->mutable_value(); - d_table_value->Resize({ids_dim[0], table_dim[1]}); + d_table_value->Resize({ids_num, table_dim[1]}); d_table_value->mutable_data(context.GetPlace()); d_table->set_height(table_dim[0]); @@ -157,7 +127,10 @@ class LookupTableGradKernel : public framework::OpKernel { auto *d_output_data = d_output->data(); auto *d_table_data = d_table_value->data(); - PADDLE_ENFORCE_EQ(d_table_value->dims(), d_output->dims()); + auto d_output_dims = d_output->dims(); + PADDLE_ENFORCE_EQ( + d_table_value->dims(), + framework::flatten_to_2d(d_output_dims, d_output_dims.size() - 1)); memcpy(d_table_data, d_output_data, sizeof(T) * d_output->numel()); } else { auto *ids = context.Input("Ids"); @@ -165,10 +138,9 @@ class LookupTableGradKernel : public framework::OpKernel { auto *d_table = context.Output(framework::GradVarName("W")); auto *ids_data = ids->data(); - auto ids_dim = ids->dims(); int N = table_dim[0]; - int D = d_output->dims()[1]; + int D = table_dim[1]; auto *d_output_data = d_output->data(); auto *d_table_data = d_table->mutable_data(context.GetPlace()); diff --git a/paddle/fluid/operators/lrn_op.cc b/paddle/fluid/operators/lrn_op.cc index f5c0e47fda..52b459a6a2 100644 --- a/paddle/fluid/operators/lrn_op.cc +++ b/paddle/fluid/operators/lrn_op.cc @@ -124,16 +124,17 @@ namespace { framework::OpKernelType GetExpectedLRNKernel( const framework::ExecutionContext& ctx) { framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + // TODO(pzelazko-intel): enable MKLDNN layout when it's ready + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); #ifdef PADDLE_WITH_MKLDNN if (library_ == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; } #endif - std::string data_format = ctx.Attr("data_format"); - // TODO(pzelazko-intel): enable MKLDNN layout when it's ready - framework::DataLayout layout_ = framework::StringToDataLayout(data_format); return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), layout_, library_); @@ -169,8 +170,7 @@ class LRNOp : public framework::OperatorWithKernel { template class LRNOpMaker : public framework::OpProtoAndCheckerMaker { public: - LRNOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of LRN operator. " "It must be a 4D tenor with NCHW format."); diff --git a/paddle/fluid/operators/lstm_op.cc b/paddle/fluid/operators/lstm_op.cc index 084ee1cfe6..3225bf9bb6 100644 --- a/paddle/fluid/operators/lstm_op.cc +++ b/paddle/fluid/operators/lstm_op.cc @@ -103,8 +103,7 @@ class LSTMOp : public framework::OperatorWithKernel { class LSTMOpMaker : public framework::OpProtoAndCheckerMaker { public: - LSTMOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LoDTensor) the first input is a LodTensor, which support " "variable-time length input sequence. The underlying tensor in " @@ -185,34 +184,32 @@ Long-Short Term Memory (LSTM) Operator. The defalut implementation is diagonal/peephole connection (https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows: -$$ -i_t = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) \\ +$$ i_t = \\sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) $$ -f_t = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) \\ +$$ f_t = \\sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) $$ -\tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) \\ +$$ \\tilde{c_t} = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) $$ -o_t = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) \\ +$$ o_t = \\sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) $$ -c_t = f_t \odot c_{t-1} + i_t \odot \tilde{c_t} \\ +$$ c_t = f_t \\odot c_{t-1} + i_t \\odot \\tilde{c_t} $$ -h_t = o_t \odot act_h(c_t) -$$ +$$ h_t = o_t \\odot act_h(c_t) $$ -where the W terms denote weight matrices (e.g. $W_{xi}$ is the matrix -of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$ -are diagonal weight matrices for peephole connections. In our implementation, -we use vectors to reprenset these diagonal weight matrices. The b terms -denote bias vectors ($b_i$ is the input gate bias vector), $\sigma$ -is the non-line activations, such as logistic sigmoid function, and -$i, f, o$ and $c$ are the input gate, forget gate, output gate, -and cell activation vectors, respectively, all of which have the same size as -the cell output activation vector $h$. - -The $\odot$ is the element-wise product of the vectors. $act_g$ and $act_h$ -are the cell input and cell output activation functions and `tanh` is usually -used for them. $\tilde{c_t}$ is also called candidate hidden state, -which is computed based on the current input and the previous hidden state. +- W terms denote weight matrices (e.g. $W_{xi}$ is the matrix + of weights from the input gate to the input), $W_{ic}, W_{fc}, W_{oc}$ + are diagonal weight matrices for peephole connections. In our implementation, + we use vectors to reprenset these diagonal weight matrices. +- The b terms denote bias vectors ($b_i$ is the input gate bias vector). +- $\sigma$ is the non-line activations, such as logistic sigmoid function. +- $i, f, o$ and $c$ are the input gate, forget gate, output gate, + and cell activation vectors, respectively, all of which have the same size as + the cell output activation vector $h$. +- The $\odot$ is the element-wise product of the vectors. +- $act_g$ and $act_h$ are the cell input and cell output activation functions + and `tanh` is usually used for them. +- $\tilde{c_t}$ is also called candidate hidden state, + which is computed based on the current input and the previous hidden state. Set `use_peepholes` False to disable peephole connection. The formula is omitted here, please refer to the paper diff --git a/paddle/fluid/operators/lstm_unit_op.cc b/paddle/fluid/operators/lstm_unit_op.cc index e1157ef6c6..0895c58f5f 100644 --- a/paddle/fluid/operators/lstm_unit_op.cc +++ b/paddle/fluid/operators/lstm_unit_op.cc @@ -48,8 +48,7 @@ class LstmUnitOp : public framework::OperatorWithKernel { class LstmUnitOpMaker : public framework::OpProtoAndCheckerMaker { public: - LstmUnitOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "Lstm unit only applies non-linear activations, please make sure" "that linear tranformation has already been applied to `X`. " diff --git a/paddle/fluid/operators/lstmp_op.cc b/paddle/fluid/operators/lstmp_op.cc index f9261323f0..e398b51480 100644 --- a/paddle/fluid/operators/lstmp_op.cc +++ b/paddle/fluid/operators/lstmp_op.cc @@ -120,8 +120,7 @@ class LSTMPOp : public framework::OperatorWithKernel { class LSTMPOpMaker : public framework::OpProtoAndCheckerMaker { public: - LSTMPOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(LoDTensor) the input for sequence data, which supports " "variable-time length input sequence. The underlying tensor in " diff --git a/paddle/fluid/operators/margin_rank_loss_op.cc b/paddle/fluid/operators/margin_rank_loss_op.cc index 0b41a3e1ff..b643ba9d7f 100644 --- a/paddle/fluid/operators/margin_rank_loss_op.cc +++ b/paddle/fluid/operators/margin_rank_loss_op.cc @@ -42,8 +42,7 @@ class MarginRankLossOp : public framework::OperatorWithKernel { template class MarginRankLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - MarginRankLossOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X1", "(2-D tensor with shape [batch_size x 1]) The score for " "one item X1 to be ranked, from pairwise ranking model."); diff --git a/paddle/fluid/operators/math/CMakeLists.txt b/paddle/fluid/operators/math/CMakeLists.txt index 53a478c1ac..d2b772d113 100644 --- a/paddle/fluid/operators/math/CMakeLists.txt +++ b/paddle/fluid/operators/math/CMakeLists.txt @@ -51,16 +51,17 @@ math_library(sequence_padding) math_library(sequence_pooling DEPS math_function) math_library(sequence_scale) math_library(softmax DEPS math_function) +math_library(matrix_bit_code) math_library(unpooling) math_library(vol2col) -cc_test(math_function_test SRCS math_function_test.cc) +cc_test(math_function_test SRCS math_function_test.cc DEPS math_function) cc_test(selected_rows_functor_test SRCS selected_rows_functor_test.cc DEPS selected_rows_functor) cc_test(im2col_test SRCS im2col_test.cc DEPS im2col) cc_test(vol2col_test SRCS vol2col_test.cc DEPS vol2col) cc_test(sequence_padding_test SRCS sequence_padding_test.cc DEPS sequence_padding) if(WITH_GPU) - nv_test(math_function_gpu_test SRCS math_function_test.cu) - nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor) + nv_test(math_function_gpu_test SRCS math_function_test.cu DEPS math_function) + nv_test(selected_rows_functor_gpu_test SRCS selected_rows_functor_test.cu DEPS selected_rows_functor math_function) endif() cc_test(concat_test SRCS concat_test.cc DEPS concat) diff --git a/paddle/fluid/operators/math/blas.cc b/paddle/fluid/operators/math/blas.cc index 3eeb77546b..6a143b3c05 100644 --- a/paddle/fluid/operators/math/blas.cc +++ b/paddle/fluid/operators/math/blas.cc @@ -13,10 +13,40 @@ // limitations under the License. #include "paddle/fluid/operators/math/blas.h" + +#include namespace paddle { namespace operators { namespace math { -// Do nothing. Blas is a header only library. +MatDescriptor CreateMatrixDescriptor(const framework::DDim &tensor_dim, + int num_flatten_cols, bool trans) { + PADDLE_ENFORCE_GT(tensor_dim.size(), 1); + MatDescriptor retv; + if (num_flatten_cols > 1) { + auto flatten_dim = framework::flatten_to_2d(tensor_dim, num_flatten_cols); + retv.height_ = flatten_dim[0]; + retv.width_ = flatten_dim[1]; + } else { + if (tensor_dim.size() == 2) { + retv.height_ = tensor_dim[0]; + retv.width_ = tensor_dim[1]; + } else { + auto dim_vec = framework::vectorize(tensor_dim); + retv.batch_size_ = 1; + for (size_t i = 0; i < dim_vec.size() - 2; ++i) { + retv.batch_size_ *= dim_vec[i]; + } + retv.height_ = dim_vec[dim_vec.size() - 2]; + retv.width_ = dim_vec[dim_vec.size() - 1]; + retv.stride_ = retv.height_ * retv.width_; + } + } + if (trans) { + std::swap(retv.width_, retv.height_); + } + retv.trans_ = trans; + return retv; +} } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/blas.h b/paddle/fluid/operators/math/blas.h index 5cd2f855d1..70f88f24f6 100644 --- a/paddle/fluid/operators/math/blas.h +++ b/paddle/fluid/operators/math/blas.h @@ -18,34 +18,65 @@ #include "paddle/fluid/framework/tensor.h" #ifdef PADDLE_WITH_MKLML -#include -#include -#include +#include "paddle/fluid/platform/dynload/mklml.h" #endif -#ifdef PADDLE_USE_OPENBLAS -#include -#include +#ifdef PADDLE_WITH_LIBXSMM +#include #endif -#ifndef LAPACK_FOUND -extern "C" { -#include // NOLINT -int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda, - int* ipiv); -int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda, - int* ipiv); -int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda, - const int* ipiv); -int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, - const int* ipiv); -} +#ifdef PADDLE_USE_OPENBLAS +#include #endif namespace paddle { namespace operators { namespace math { +/** + * Matrix Descriptor of a memory buffer. + * + * It is used for Blas::MatMul. MatMul operator can be batched. + * if Mat A is [BatchSize, H, W], Mat B is [BatchSize, H, W]. It will be a + * `batch_size` times of GEMM. The batched GEMM could be faster base on the + * implementation of the blas library. The batch size could be zero. If any + * matrix of `matmul` has a batch size, the will be a batched GEMM, too. e.g., + * Mat A is [BatchSize, H1, W2], and Mat B [H2, W2], The result matrix wil be + * [BatchSize, H1, W2] + * + * The boolean flag, `trans`, describe the memory is the transpose of matrix or + * not. If the trans is true, the last two dims of matrix are transposed. The + * memory layout of the matrix is [Width, Height] or [BatchSize, Width, Height]. + * + * The MatDescriptor is not only the dimension or shape of a matrix, it also + * contains the layout, stride of matrix. It is clearer to have a structure than + * reuse `DDim`. + */ +struct MatDescriptor { + int64_t height_; + int64_t width_; + int64_t stride_{0}; + int64_t batch_size_{0}; + bool trans_; +}; + +/** + * Create Matrix Descriptor from a tensor dim, num_flatten_cols, and transpose + * flag + * + * @param tensor_dim: The dimension of the tensor. The rank of this dimension + * must larger than 1. + * + * @param num_flatten_cols: Reshape a tensor to a matrix. The matrix's first + * dimension(column length) will be the product of tensor's first `num_col_dims` + * dimensions. If num_flatten_cols is zero, the first N-2 dimension will be the + * batch_size of descriptor. + * + * @param trans: True if the matrix is transposed. + */ +extern MatDescriptor CreateMatrixDescriptor(const framework::DDim& tensor_dim, + int num_flatten_cols, bool trans); + template class Blas { public: @@ -81,6 +112,12 @@ class Blas { template void AXPY(int n, T alpha, const T* x, T* y) const; + template + void VADD(int n, const T* x, const T* y, T* z) const; + + template + void VCOPY(int n, const T* x, T* y) const; + template void GEMV(bool trans_a, int M, int N, T alpha, const T* A, const T* B, T beta, T* C) const; @@ -90,6 +127,11 @@ class Blas { int K, T alpha, const T* A, const T* B, T beta, T* C, int batchCount, int64_t strideA, int64_t strideB) const; + template + void MatMul(const framework::Tensor& mat_a, const MatDescriptor& dim_a, + const framework::Tensor& mat_b, const MatDescriptor& dim_b, + T alpha, framework::Tensor* mat_out, T beta) const; + private: const DeviceContext& context_; }; @@ -114,6 +156,16 @@ class BlasT : private Blas { Base()->template AXPY(args...); } + template + void VADD(ARGS... args) const { + Base()->template VADD(args...); + } + + template + void VCOPY(ARGS... args) const { + Base()->template VCOPY(args...); + } + template void GEMV(ARGS... args) const { Base()->template GEMV(args...); diff --git a/paddle/fluid/operators/math/blas_impl.cu.h b/paddle/fluid/operators/math/blas_impl.cu.h index c76fc17d78..d84c88cb3b 100644 --- a/paddle/fluid/operators/math/blas_impl.cu.h +++ b/paddle/fluid/operators/math/blas_impl.cu.h @@ -96,10 +96,22 @@ struct CUBlas { reinterpret_cast<__half *>(C), ldc)); } - template - static void GEMM_BATCH(ARGS... args) { + static void GEMM_BATCH(cublasHandle_t handle, cublasOperation_t transa, + cublasOperation_t transb, int m, int n, int k, + const float16 *alpha, const float16 *A, int lda, + long long int strideA, const float16 *B, // NOLINT + int ldb, long long int strideB, // NOLINT + const float16 *beta, float16 *C, int ldc, + long long int strideC, // NOLINT + int batchCount) { #if CUDA_VERSION >= 8000 - PADDLE_ENFORCE(platform::dynload::cublasHgemmStridedBatched(args...)); + PADDLE_ENFORCE(platform::dynload::cublasHgemmStridedBatched( + handle, transa, transb, m, n, k, + reinterpret_cast(alpha), + reinterpret_cast(A), lda, strideA, + reinterpret_cast(B), ldb, strideB, + reinterpret_cast(beta), reinterpret_cast<__half *>(C), + ldc, strideC, batchCount)); #else PADDLE_THROW("HgemmStridedBatched is not supported on cuda <= 7.5"); #endif diff --git a/paddle/fluid/operators/math/blas_impl.h b/paddle/fluid/operators/math/blas_impl.h index 7360cc0a90..a0802ef90c 100644 --- a/paddle/fluid/operators/math/blas_impl.h +++ b/paddle/fluid/operators/math/blas_impl.h @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. #pragma once +#include #include #include "paddle/fluid/operators/math/math_function.h" @@ -22,29 +23,110 @@ namespace math { template struct CBlas; +#ifdef PADDLE_WITH_MKLML template <> struct CBlas { template static void GEMM(ARGS... args) { - cblas_sgemm(args...); + platform::dynload::cblas_sgemm(args...); } +#ifdef PADDLE_WITH_LIBXSMM + template + static void SMM_GEMM(ARGS... args) { + libxsmm_sgemm(args...); + } +#endif + template static void AXPY(ARGS... args) { - cblas_saxpy(args...); + platform::dynload::cblas_saxpy(args...); + } + + template + static void VCOPY(ARGS... args) { + platform::dynload::cblas_scopy(args...); } template static void GEMV(ARGS... args) { - cblas_sgemv(args...); + platform::dynload::cblas_sgemv(args...); } -#ifdef PADDLE_WITH_MKLML template static void GEMM_BATCH(ARGS... args) { - cblas_sgemm_batch(args...); + platform::dynload::cblas_sgemm_batch(args...); + } + + template + static void VADD(ARGS... args) { + platform::dynload::vsAdd(args...); + } +}; + +template <> +struct CBlas { + template + static void GEMM(ARGS... args) { + platform::dynload::cblas_dgemm(args...); + } + +#ifdef PADDLE_WITH_LIBXSMM + template + static void SMM_GEMM(ARGS... args) { + libxsmm_dgemm(args...); } #endif + + template + static void AXPY(ARGS... args) { + platform::dynload::cblas_daxpy(args...); + } + + template + static void VCOPY(ARGS... args) { + platform::dynload::cblas_dcopy(args...); + } + + template + static void GEMV(ARGS... args) { + platform::dynload::cblas_dgemv(args...); + } + + template + static void GEMM_BATCH(ARGS... args) { + platform::dynload::cblas_dgemm_batch(args...); + } + + template + static void VADD(ARGS... args) { + platform::dynload::vdAdd(args...); + } +}; + +#else + +template <> +struct CBlas { + template + static void GEMM(ARGS... args) { + cblas_sgemm(args...); + } + + template + static void AXPY(ARGS... args) { + cblas_saxpy(args...); + } + + template + static void VCOPY(ARGS... args) { + cblas_scopy(args...); + } + + template + static void GEMV(ARGS... args) { + cblas_sgemv(args...); + } }; template <> @@ -60,21 +142,23 @@ struct CBlas { } template - static void GEMV(ARGS... args) { - cblas_dgemv(args...); + static void VCOPY(ARGS... args) { + cblas_dcopy(args...); } -#ifdef PADDLE_WITH_MKLML template - static void GEMM_BATCH(ARGS... args) { - cblas_dgemm_batch(args...); + static void GEMV(ARGS... args) { + cblas_dgemv(args...); } -#endif }; +#endif template <> struct CBlas { static void GEMM(...) { PADDLE_THROW("float16 GEMM not supported on CPU"); } + static void SMM_GEMM(...) { + PADDLE_THROW("float16 SMM_GEMM not supported on CPU"); + } #ifdef PADDLE_WITH_MKLML static void GEMM_BATCH(...) { PADDLE_THROW("float16 GEMM_BATCH not supported on CPU"); @@ -82,6 +166,64 @@ struct CBlas { #endif }; +template +inline bool UseXSMM(const int &m, const int &n, const int &k, bool transa, + bool transb, const T &alpha, const T &beta) { +#ifdef PADDLE_WITH_LIBXSMM + // Refer to https://github.com/hfp/libxsmm/blob/master/README.md + // But the threshold is custom + constexpr int LIBXSMM_THRESHOLD = 20 * 20 * 20; + if (m * n * k > LIBXSMM_THRESHOLD || transa || transb || + std::abs(alpha - static_cast(1) > + std::numeric_limits::epsilon()) || + std::abs(beta) > std::numeric_limits::epsilon()) { + return false; + } else { + return true; + } +#endif + return false; +} + +template <> +inline bool UseXSMM(const int &m, const int &n, const int &k, + bool transa, bool transb, + const platform::float16 &alpha, + const platform::float16 &beta) { + return false; +} + +template +inline void GEMM_WARP(CBLAS_ORDER order, CBLAS_TRANSPOSE transA, + CBLAS_TRANSPOSE transB, int M, int N, int K, T alpha, + const T *A, int lda, const T *B, int ldb, T beta, T *C, + int ldc) { +#ifdef PADDLE_WITH_LIBXSMM + if (UseXSMM(M, N, K, transA != CblasNoTrans, transB != CblasNoTrans, alpha, + beta)) { + // Note: SMM use ColMajor + const char transa = 'N'; + const char transb = 'N'; + CBlas::SMM_GEMM(&transa, &transb, &N, &M, &K, &alpha, B, &ldb, A, &lda, + &beta, C, &ldc); + return; + } +#endif + +#ifdef PADDLE_MKL_SPLIT_GEMM + constexpr int bs = 2; + if (M % bs == 0 && transA == CblasNoTrans && transB == CblasNoTrans) { + for (int off = 0; off < M; off += bs) { + CBlas::GEMM(CblasRowMajor, CblasNoTrans, CblasNoTrans, bs, N, K, alpha, + A + off * lda, lda, B, ldb, beta, C + off * ldb, ldc); + } + return; + } +#endif + CBlas::GEMM(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, + beta, C, ldc); +} + template <> template void Blas::GEMM(CBLAS_TRANSPOSE transA, @@ -91,8 +233,8 @@ void Blas::GEMM(CBLAS_TRANSPOSE transA, int lda = (transA == CblasNoTrans) ? K : M; int ldb = (transB == CblasNoTrans) ? N : K; int ldc = N; - CBlas::GEMM(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, - beta, C, ldc); + GEMM_WARP(CblasRowMajor, transA, transB, M, N, K, alpha, A, lda, B, ldb, + beta, C, ldc); } template <> @@ -101,9 +243,9 @@ void Blas::GEMM(bool transA, bool transB, int M, int N, int K, T alpha, const T *A, int lda, const T *B, int ldb, T beta, T *C, int ldc) const { - CBlas::GEMM(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans, - transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A, - lda, B, ldb, beta, C, ldc); + GEMM_WARP(CblasRowMajor, transA == false ? CblasNoTrans : CblasTrans, + transB == false ? CblasNoTrans : CblasTrans, M, N, K, alpha, A, + lda, B, ldb, beta, C, ldc); } template @@ -139,6 +281,24 @@ void Blas::AXPY(int n, T alpha, const T *x, CBlas::AXPY(n, alpha, x, 1, y, 1); } +template <> +template +void Blas::VCOPY(int n, const T *x, T *y) const { + CBlas::VCOPY(n, x, 1, y, 1); +} + +template <> +template +void Blas::VADD(int n, const T *x, const T *y, + T *z) const { +#ifdef PADDLE_WITH_MKLML + CBlas::VADD(n, x, y, z); +#else + this->template VCOPY(n, y, z); + this->template AXPY(n, 1., x, z); +#endif +} + template <> template void Blas::GEMV(bool trans_a, int M, int N, T alpha, @@ -172,14 +332,39 @@ void Blas::BatchedGEMM( c_array.data(), &ldc, 1 /* group_count */, &batchCount); #else for (int k = 0; k < batchCount; ++k) { - const float *Ak = &A[k * strideA]; - const float *Bk = &B[k * strideB]; - float *Ck = &C[k * M * N]; + auto *Ak = &A[k * strideA]; + auto *Bk = &B[k * strideB]; + auto *Ck = &C[k * M * N]; this->template GEMM(transA, transB, M, N, K, alpha, Ak, Bk, beta, Ck); } #endif } +template +template +void Blas::MatMul(const framework::Tensor &mat_a, + const MatDescriptor &dim_a, + const framework::Tensor &mat_b, + const MatDescriptor &dim_b, T alpha, + framework::Tensor *mat_out, T beta) const { + PADDLE_ENFORCE_EQ(dim_a.width_, dim_b.height_); + CBLAS_TRANSPOSE transA = !dim_a.trans_ ? CblasNoTrans : CblasTrans; + CBLAS_TRANSPOSE transB = !dim_b.trans_ ? CblasNoTrans : CblasTrans; + if (dim_a.batch_size_ == 0 && dim_b.batch_size_ == 0) { + this->template GEMM(transA, transB, dim_a.height_, dim_b.width_, + dim_a.width_, alpha, mat_a.data(), + mat_b.data(), beta, mat_out->data()); + } else { + PADDLE_ENFORCE(dim_a.batch_size_ == dim_b.batch_size_ || + dim_a.batch_size_ == 0 || dim_b.batch_size_ == 0); + this->template BatchedGEMM( + transA, transB, dim_a.height_, dim_b.width_, dim_a.width_, alpha, + mat_a.data(), mat_b.data(), beta, mat_out->data(), + dim_a.batch_size_ == 0 ? dim_b.batch_size_ : dim_a.batch_size_, + dim_a.stride_, dim_b.stride_); + } +} + } // namespace math } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/math/concat.cc b/paddle/fluid/operators/math/concat.cc index cc69212466..55c8a472ac 100644 --- a/paddle/fluid/operators/math/concat.cc +++ b/paddle/fluid/operators/math/concat.cc @@ -70,21 +70,23 @@ template class ConcatGradFunctor { public: void operator()(const platform::CPUDeviceContext& context, - const framework::Tensor& input, const int axis, - std::vector* outputs) { + const framework::Tensor& input, + const std::vector& ref_inputs, + const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking - int num = outputs->size(); + size_t num = outputs->size(); int input_rows = 1; - auto dim_0 = outputs->at(0).dims(); + auto dim_0 = ref_inputs[0]->dims(); for (int i = 0; i < axis; ++i) { input_rows *= dim_0[i]; } + int input_cols = 0; std::vector output_cols(outputs->size()); - for (int i = 0; i < num; ++i) { - int t_cols = outputs->at(i).numel() / input_rows; + for (size_t i = 0; i < num; ++i) { + int t_cols = ref_inputs[i]->numel() / input_rows; input_cols += t_cols; output_cols[i] = t_cols; } @@ -94,11 +96,14 @@ class ConcatGradFunctor { for (int k = 0; k < input_rows; ++k) { const T* src_ptr = input.data() + k * input_cols; int col_idx = 0; - for (int j = 0; j < num; ++j) { + for (size_t j = 0; j < num; ++j) { int col_len = output_cols[j]; - T* dst_ptr = outputs->at(j).data() + k * col_len; - memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx, - sizeof(T) * col_len); + auto* out_tensor = outputs->at(j); + if (out_tensor != nullptr) { + T* dst_ptr = out_tensor->data() + k * col_len; + memory::Copy(cpu_place, dst_ptr, cpu_place, src_ptr + col_idx, + sizeof(T) * col_len); + } col_idx += col_len; } } diff --git a/paddle/fluid/operators/math/concat.cu b/paddle/fluid/operators/math/concat.cu index 4285d38dcd..5863d74fca 100644 --- a/paddle/fluid/operators/math/concat.cu +++ b/paddle/fluid/operators/math/concat.cu @@ -22,43 +22,24 @@ namespace paddle { namespace operators { namespace math { -template -__device__ T upper_bound(const T* first, T count, T val) { - const T* orig = first; - const T* it = nullptr; - T step = 0; - while (count > 0) { - it = first; - step = count / 2; - it += step; - if (!(val < *it)) { - first = ++it; - count -= step + 1; - } else { - count = step; - } - } - return first - orig; -} - template __global__ void KernelConcat(T** inputs, const int* input_cols, int col_size, const int output_rows, const int output_cols, T* output) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; - int segment = upper_bound(input_cols, col_size, tid_x) - 1; - - int curr_offset = input_cols[segment]; - int curr_segment = segment; + int curr_segment = 0; + int curr_offset = input_cols[0]; for (; tid_x < output_cols; tid_x += blockDim.x * gridDim.x) { - T curr_col_offset; - while ((curr_col_offset = input_cols[curr_segment + 1]) <= tid_x) { + int curr_col_offset = input_cols[curr_segment + 1]; + while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; + curr_col_offset = input_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; + T* input_ptr = inputs[curr_segment]; int tid_y = blockIdx.y * blockDim.y + threadIdx.y; for (; tid_y < output_rows; tid_y += blockDim.y * gridDim.y) @@ -89,23 +70,25 @@ __global__ void KernelConcatGrad(const T* input_data, const int in_row, const int in_col, const int* out_cols, int out_cols_size, T** outputs_data) { int tid_x = blockIdx.x * blockDim.x + threadIdx.x; - int segment = upper_bound(out_cols, out_cols_size, tid_x) - 1; - int curr_offset = out_cols[segment]; - int curr_segment = segment; + int curr_segment = 0; + int curr_offset = out_cols[0]; for (; tid_x < in_col; tid_x += blockDim.x * gridDim.x) { - T curr_col_offset; - while ((curr_col_offset = out_cols[curr_segment + 1]) <= tid_x) { + int curr_col_offset = out_cols[curr_segment + 1]; + while (curr_col_offset <= tid_x) { curr_offset = curr_col_offset; ++curr_segment; + curr_col_offset = out_cols[curr_segment + 1]; } int local_col = tid_x - curr_offset; int segment_width = curr_col_offset - curr_offset; T* output_ptr = outputs_data[curr_segment]; - int tid_y = blockIdx.y * blockDim.y + threadIdx.y; - for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) - output_ptr[tid_y * segment_width + local_col] = - input_data[tid_y * in_col + tid_x]; + if (output_ptr != nullptr) { + int tid_y = blockIdx.y * blockDim.y + threadIdx.y; + for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) + output_ptr[tid_y * segment_width + local_col] = + input_data[tid_y * in_col + tid_x]; + } } } @@ -118,10 +101,12 @@ __global__ void KernelConcatGrad(const T* input_data, const int in_row, int split = tid_x / fixed_out_col; int in_offset = tid_x - split * fixed_out_col; T* output_ptr = outputs_data[split]; - int tid_y = blockIdx.y * blockDim.y + threadIdx.y; - for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) - output_ptr[tid_y * fixed_out_col + in_offset] = - input_data[tid_y * in_col + tid_x]; + if (output_ptr != nullptr) { + int tid_y = blockIdx.y * blockDim.y + threadIdx.y; + for (; tid_y < in_row; tid_y += blockDim.y * gridDim.y) + output_ptr[tid_y * fixed_out_col + in_offset] = + input_data[tid_y * in_col + tid_x]; + } } } @@ -203,17 +188,18 @@ template class ConcatGradFunctor { public: void operator()(const platform::CUDADeviceContext& context, - const framework::Tensor& input, const int axis, - std::vector* outputs) { + const framework::Tensor& input, + const std::vector& ref_inputs, + const int axis, std::vector* outputs) { // TODO(zcd): Add input data validity checking int o_num = outputs->size(); int out_row = 1; - auto dim_0 = outputs->at(0).dims(); + auto dim_0 = ref_inputs[0]->dims(); for (int i = 0; i < axis; ++i) { out_row *= dim_0[i]; } - int out_col = outputs->at(0).numel() / out_row; + int out0_col = ref_inputs[0]->numel() / out_row; int in_col = 0, in_row = out_row; bool sameShape = true; @@ -223,13 +209,17 @@ class ConcatGradFunctor { outputs_cols[0] = 0; for (int i = 0; i < o_num; ++i) { - int t_col = outputs->at(i).numel() / out_row; + int t_col = ref_inputs.at(i)->numel() / out_row; if (sameShape) { - if (t_col != out_col) sameShape = false; + if (t_col != out0_col) sameShape = false; } in_col += t_col; outputs_cols[i + 1] = in_col; - outputs_ptr[i] = outputs->at(i).data(); + if (outputs->at(i) != nullptr) { + outputs_ptr[i] = outputs->at(i)->data(); + } else { + outputs_ptr[i] = nullptr; + } } T** dev_out_gpu_data = @@ -255,7 +245,7 @@ class ConcatGradFunctor { if (sameShape) { KernelConcatGrad<<>>( - input.data(), in_row, in_col, out_col, dev_out_gpu_data); + input.data(), in_row, in_col, out0_col, dev_out_gpu_data); } else { const int* dev_outs_col_data = outputs_cols.CUDAData(context.GetPlace()); KernelConcatGrad<<>>( diff --git a/paddle/fluid/operators/math/concat.h b/paddle/fluid/operators/math/concat.h index 041ce8bf8a..9e080f2e8b 100644 --- a/paddle/fluid/operators/math/concat.h +++ b/paddle/fluid/operators/math/concat.h @@ -57,7 +57,8 @@ template class ConcatGradFunctor { public: void operator()(const DeviceContext& context, const framework::Tensor& input, - const int axis, std::vector* outputs); + const std::vector& ref_inputs, + const int axis, std::vector* outputs); }; } // namespace math diff --git a/paddle/fluid/operators/math/cross_entropy.cc b/paddle/fluid/operators/math/cross_entropy.cc index fc0fca5ad3..caff35e03a 100644 --- a/paddle/fluid/operators/math/cross_entropy.cc +++ b/paddle/fluid/operators/math/cross_entropy.cc @@ -46,7 +46,10 @@ class CrossEntropyFunctor { const int64_t* label_data = labels->data(); for (int i = 0; i < batch_size; ++i) { - int index = i * class_num + label_data[i]; + int lbl = label_data[i]; + PADDLE_ENFORCE_GE(lbl, 0); + PADDLE_ENFORCE_LT(lbl, class_num); + int index = i * class_num + lbl; loss_data[i] = -math::TolerableValue()(std::log(prob_data[index])); } } diff --git a/paddle/fluid/operators/math/detail/avx_functions.cc b/paddle/fluid/operators/math/detail/avx_functions.cc index b95109d3f7..5641f91452 100644 --- a/paddle/fluid/operators/math/detail/avx_functions.cc +++ b/paddle/fluid/operators/math/detail/avx_functions.cc @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "paddle/fluid/operators/math/detail/activation_functions.h" // TODO(qingqing) refine this dependence -#include "paddle/cuda/src/avx_mathfun.h" +#include "paddle/legacy/cuda/src/avx_mathfun.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/math/functors.h b/paddle/fluid/operators/math/functors.h new file mode 100644 index 0000000000..ad2f49ccbf --- /dev/null +++ b/paddle/fluid/operators/math/functors.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +namespace paddle { +namespace operators { +namespace math { + +// AddFunctor +template +struct AddFunctor { + // out = x + y; + inline HOSTDEVICE T operator()(T x, T y) { return x + y; } +}; + +template +struct AddGradFunctor { + inline HOSTDEVICE T operator()(T x, T y) { return 1; } + + inline HOSTDEVICE T operator()(T x, T y, T out) const { return 1; } +}; + +template +struct ScaleFunctor { + explicit ScaleFunctor(const T coeff) : coeff_(coeff) {} + + inline HOSTDEVICE T operator()(T ele) { return ele * coeff_; } + + private: + T coeff_; +}; + +template +struct ScaleGradFunctor { + explicit ScaleGradFunctor(T coeff) : coeff_(coeff) {} + + inline HOSTDEVICE T operator()(T x) { return coeff_; } + + inline HOSTDEVICE T operator()(T x, T out) { return coeff_; } + + private: + T coeff_; +}; + +template +struct ReluFunctor { + inline HOSTDEVICE T operator()(T x) { return x * (x > 0); } +}; + +template +struct ReluGradFunctor { + inline HOSTDEVICE T operator()(T x) { return x > 0 ? 1 : 0; } + + inline HOSTDEVICE T operator()(T x, T out) { return x > 0 ? 1 : 0; } +}; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/im2col.cc b/paddle/fluid/operators/math/im2col.cc index 336d6febc2..1472edbbf4 100644 --- a/paddle/fluid/operators/math/im2col.cc +++ b/paddle/fluid/operators/math/im2col.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" #include +#include "paddle/fluid/operators/math/im2col_cfo_cpu.h" namespace paddle { namespace operators { @@ -35,51 +36,18 @@ class Im2ColFunctordims().size() == 5); - int im_channels = im.dims()[0]; - int im_height = im.dims()[1]; - int im_width = im.dims()[2]; - int filter_height = col->dims()[1]; - int filter_width = col->dims()[2]; - int col_height = col->dims()[3]; - int col_width = col->dims()[4]; - - PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - - ((dilation[0] * (filter_height - 1) + 1))) / - stride[0] + - 1, - col_height, - "Output_height and padding(padding_up, padding_down) are " - "inconsistent."); - PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - - ((dilation[1] * (filter_width - 1) + 1))) / - stride[1] + - 1, - col_width, - "Output_height and padding(padding_up, padding_down) are " - "inconsistent."); - - int channels_col = im_channels * filter_height * filter_width; - - const T* im_data = im.data(); - T* col_data = col->data(); - for (int c = 0; c < channels_col; ++c) { - int w_offset = c % filter_width; - int h_offset = (c / filter_width) % filter_height; - int c_im = c / (filter_width * filter_height); - for (int h = 0; h < col_height; ++h) { - int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0]; - for (int w = 0; w < col_width; ++w) { - int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1]; - int col_idx = (c * col_height + h) * col_width + w; - int im_idx = (im_row_idx + c_im * im_height) * im_width + im_col_idx; - - col_data[col_idx] = (im_row_idx < 0 || im_row_idx >= im_height || - im_col_idx < 0 || im_col_idx >= im_width) - ? static_cast(0) - : im_data[im_idx]; - } + if (stride[0] == 1 && stride[1] == 1 && dilation[0] == 1 && + dilation[1] == 1) { + if (padding[0] == 0 && padding[1] == 0) { + im2col_sh1sw1dh1dw1ph0pw0(im, col); + return; + } else if (padding[0] == 1 && padding[1] == 1) { + im2col_sh1sw1dh1dw1ph1pw1(im, col); + return; } + // TODO(TJ): complete padding >=2 } + im2col_common(im, dilation, stride, padding, col); } }; @@ -178,17 +146,6 @@ class Im2ColFunctordims()[0]; int col_width = col->dims()[1]; - PADDLE_ENFORCE_EQ( - (im_height + padding[0] + padding[2] - filter_height) / stride[0] + 1, - col_height, - "Output_height and padding(padding_up, padding_down) are " - "inconsistent."); - PADDLE_ENFORCE_EQ( - (im_width + padding[1] + padding[3] - filter_width) / stride[1] + 1, - col_width, - "col_width and padding(padding_left, padding_right) are " - "inconsistent."); - const T* im_data = im.data(); T* col_data = col->data(); diff --git a/paddle/fluid/operators/math/im2col.cu b/paddle/fluid/operators/math/im2col.cu index eecb233d22..4897767f4d 100644 --- a/paddle/fluid/operators/math/im2col.cu +++ b/paddle/fluid/operators/math/im2col.cu @@ -77,21 +77,6 @@ class Im2ColFunctordims()[3]; int col_width = col->dims()[4]; - PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - - (dilation[0] * (filter_height - 1) + 1)) / - stride[0] + - 1, - col_height, - "Output_height and padding(padding_up, padding_down) are " - "inconsistent."); - PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - - (dilation[1] * (filter_width - 1) + 1)) / - stride[1] + - 1, - col_width, - "col_width and padding(padding_left, padding_right) are " - "inconsistent."); - int num_outputs = im_channels * col_height * col_width; int blocks = (num_outputs + 1024 - 1) / 1024; int block_x = 512; @@ -274,21 +259,6 @@ class Im2ColFunctordims()[0]; int col_width = col->dims()[1]; - PADDLE_ENFORCE_EQ((im_height + padding[0] + padding[2] - - (dilation[0] * (filter_height - 1) + 1)) / - stride[0] + - 1, - col_height, - "Output_height and padding(padding_up, padding_down) are " - "inconsistent."); - PADDLE_ENFORCE_EQ((im_width + padding[1] + padding[3] - - (dilation[1] * (filter_width - 1) + 1)) / - stride[1] + - 1, - col_width, - "col_width and padding(padding_left, padding_right) are " - "inconsistent."); - int block_dim_x = 0; int block_dim_y = 0; if (filter_height <= 4 && filter_width <= 4) { diff --git a/paddle/fluid/operators/math/im2col_cfo_cpu.h b/paddle/fluid/operators/math/im2col_cfo_cpu.h new file mode 100644 index 0000000000..0d32bc5bd0 --- /dev/null +++ b/paddle/fluid/operators/math/im2col_cfo_cpu.h @@ -0,0 +1,252 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/framework/tensor.h" + +namespace paddle { +namespace operators { +namespace math { + +/** + * The most common im2col algorithm. + * Support dilation, stride and padding. + */ +template +inline void im2col_common(const framework::Tensor& im, + const std::vector& dilation, + const std::vector& stride, + const std::vector& padding, + framework::Tensor* col) { + int im_channels = im.dims()[0]; + int im_height = im.dims()[1]; + int im_width = im.dims()[2]; + int filter_height = col->dims()[1]; + int filter_width = col->dims()[2]; + int output_height = col->dims()[3]; + int output_width = col->dims()[4]; + int channels_col = im_channels * filter_height * filter_width; + + const T* im_data = im.data(); + T* col_data = col->data(); + for (int c = 0; c < channels_col; ++c) { + int w_offset = c % filter_width; + int h_offset = (c / filter_width) % filter_height; + int c_im = c / (filter_width * filter_height); + for (int h = 0; h < output_height; ++h) { + int im_row_idx = h * stride[0] - padding[0] + h_offset * dilation[0]; + for (int w = 0; w < output_width; ++w) { + int im_col_idx = w * stride[1] - padding[1] + w_offset * dilation[1]; + int col_idx = (c * output_height + h) * output_width + w; + int im_idx = (im_row_idx + c_im * im_height) * im_width + im_col_idx; + col_data[col_idx] = (im_row_idx < 0 || im_row_idx >= im_height || + im_col_idx < 0 || im_col_idx >= im_width) + ? static_cast(0) + : im_data[im_idx]; + } + } + } +} + +/** + * im2col algorithm with strides == 1, dilations == 1, paddings == 0 + */ +template +inline void im2col_sh1sw1dh1dw1ph0pw0(const framework::Tensor& im, + framework::Tensor* col) { + int im_channels = im.dims()[0]; + int im_height = im.dims()[1]; + int im_width = im.dims()[2]; + int filter_height = col->dims()[1]; + int filter_width = col->dims()[2]; + int output_height = col->dims()[3]; + int output_width = col->dims()[4]; + + const T* im_data = im.data(); + T* col_data = col->data(); + int col_matrix_width = output_width * output_height; + int im_size = im_height * im_width; + size_t copy_size = sizeof(T) * output_width; + const T* im_data_oh = im_data; + T* dst_data_oh = col_data; + for (int oh = 0; oh < output_height; ++oh) { + const T* src_data_ic = im_data_oh; + T* dst_data = dst_data_oh; + for (int ic = 0; ic < im_channels; ++ic) { + const T* src_data = src_data_ic; + for (int kh = 0; kh < filter_height; ++kh) { + for (int kw = 0; kw < filter_width; ++kw) { + std::memcpy(dst_data, src_data + kw, copy_size); + dst_data = dst_data + col_matrix_width; + } + src_data = src_data + im_width; + } + src_data_ic = src_data_ic + im_size; + } + im_data_oh = im_data_oh + im_width; + dst_data_oh = dst_data_oh + output_width; + } +} + +/** + * im2col algorithm with strides == 1, dilations == 1, paddings == 1 + * and filter_width == 1 have a special implementation + */ +template +inline void im2col_sh1sw1dh1dw1ph1pw1(const framework::Tensor& im, + framework::Tensor* col) { + int im_channels = im.dims()[0]; + int im_height = im.dims()[1]; + int im_width = im.dims()[2]; + int filter_height = col->dims()[1]; + int filter_width = col->dims()[2]; + int output_height = col->dims()[3]; + int output_width = col->dims()[4]; + + constexpr int plh = 1; + constexpr int prh = 1; + constexpr int plw = 1; + constexpr int prw = 1; + + const T* im_data = im.data(); + T* col_data = col->data(); + int im_size = im_height * im_width; + int col_matrix_width = output_width * output_height; + int col_block_fh = filter_width * col_matrix_width; // fw*oh*ow + int col_block_ic = filter_height * col_block_fh; // fh*fw*oh*ow + + // fill height padding + { + size_t copy_size = sizeof(T) * output_width; + T* col_start_l = col_data; + T* col_start_r = col_data + (filter_height - 1) * col_block_fh + + col_matrix_width - output_width; + for (int ic = 0; ic < im_channels; ++ic) { + T* dst_data_l = col_start_l; + T* dst_data_r = col_start_r; + for (int kw = 0; kw < filter_width; ++kw) { + std::memset(dst_data_l, 0, copy_size); + std::memset(dst_data_r, 0, copy_size); + dst_data_l = dst_data_l + col_matrix_width; + dst_data_r = dst_data_r + col_matrix_width; + } + col_start_l = col_start_l + col_block_ic; + col_start_r = col_start_r + col_block_ic; + } + } + + auto pad = static_cast(0); + if (filter_width == 1) { + // fill width padding + T* dst_data_ic = col_data; + for (int ic = 0; ic < im_channels; ++ic) { + T* dst_data_kh = dst_data_ic; + for (int kh = 0; kh < filter_height; ++kh) { + T* dst_data = dst_data_kh; + for (int oh = 0; oh < output_height; ++oh) { + *dst_data = pad; + dst_data = dst_data + output_width - 1; + *dst_data = pad; + ++dst_data; + } + dst_data_kh = dst_data_kh + col_block_fh; + } + dst_data_ic = dst_data_ic + col_block_ic; + } + // fill core + size_t copy_size = sizeof(T) * (output_width - plw - prw); + for (int oh = 0; oh < output_height; ++oh) { + const T* im_data_start = + im_data + (oh - plh > 0 ? oh - plh : 0) * im_width; + T* dst_data = col_data + oh * output_width; + for (int ic = 0; ic < im_channels; ++ic) { + const T* src_data = im_data_start + ic * im_size; + for (int kh = 0; kh < filter_height; ++kh) { + if ((oh < plh && kh < plh) || (oh > (output_height - prh - 1) && + kh > (filter_height - prh - 1))) { + dst_data = dst_data + col_matrix_width; + continue; + } + std::memcpy(dst_data + plw, src_data, copy_size); + dst_data = dst_data + col_matrix_width; + src_data = src_data + im_width; + } + } + } + return; + } + + // filter_width != 1 + // fill width padding + T* dst_data_ic = col_data; + for (int ic = 0; ic < im_channels; ++ic) { + T* dst_data_kh = dst_data_ic; + for (int kh = 0; kh < filter_height; ++kh) { + for (T* dst_data : + {dst_data_kh, dst_data_kh + (filter_width - prw) * col_matrix_width + + output_width - 1}) { + // TODO(TJ): from plh, saving repeated assignment + for (int oh = 0; oh < output_height; ++oh) { + *dst_data = pad; + dst_data = dst_data + output_width; + } + } + dst_data_kh = dst_data_kh + col_block_fh; + } + dst_data_ic = dst_data_ic + col_block_ic; + } + + // TODO(TJ): use array like: size_t copy_size[kw]={sizeof(T) * + // (output_width-1)} + // length of copy_size is equal kw. + for (int oh = 0; oh < output_height; ++oh) { + const T* im_data_start = im_data + (oh - plh > 0 ? oh - plh : 0) * im_width; + T* dst_data = col_data + oh * output_width; + for (int ic = 0; ic < im_channels; ++ic) { + const T* src_data = im_data_start + ic * im_size; + for (int kh = 0; kh < filter_height; ++kh) { + if ((oh < plh && kh < plh) || (oh > (output_height - prh - 1) && + kh > (filter_height - prh - 1))) { + dst_data = dst_data + filter_width * col_matrix_width; + continue; + } + // TODO(TJ): reuse plw-kw outside this for + // try to unify + for (int kw = 0; kw < plw; ++kw) { + std::memcpy(dst_data + (plw - kw), src_data, + sizeof(T) * (output_width - (plw - kw))); + dst_data = dst_data + col_matrix_width; + } + for (int kw = plw; kw < filter_width - prw; ++kw) { + std::memcpy(dst_data, src_data + (kw - plw), + sizeof(T) * output_width); + dst_data = dst_data + col_matrix_width; + } + int i = 1; + for (int kw = filter_width - prw; kw < filter_width; ++kw, ++i) { + std::memcpy(dst_data, src_data + (kw - plw), + sizeof(T) * (output_width - i)); + dst_data = dst_data + col_matrix_width; + } + src_data = src_data + im_width; + } + } + } +} + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/im2col_test.cc b/paddle/fluid/operators/math/im2col_test.cc index 8e3f0f2868..ae2c90b33a 100644 --- a/paddle/fluid/operators/math/im2col_test.cc +++ b/paddle/fluid/operators/math/im2col_test.cc @@ -14,7 +14,9 @@ limitations under the License. */ #include "paddle/fluid/operators/math/im2col.h" #include +#include #include +#include "paddle/fluid/operators/math/im2col_cfo_cpu.h" template void testIm2col() { @@ -167,3 +169,104 @@ TEST(math, im2col) { paddle::platform::CUDAPlace>(); #endif } + +#define PREPARE_IM2COL_CPU \ + paddle::platform::CPUPlace place; \ + paddle::platform::CPUDeviceContext context(place); \ + paddle::framework::Tensor input; \ + paddle::framework::Tensor out; \ + paddle::framework::Tensor ref; \ + std::vector padding({ph, pw}); \ + std::vector stride({1, 1}); \ + std::vector dilation({1, 1}); \ + float* input_ptr = input.mutable_data({ic, ih, iw}, place); \ + for (int i = 0; i < input.numel(); ++i) { \ + input_ptr[i] = static_cast(i + 1); \ + } \ + int output_height = (ih - fh + padding[0] * 2) / stride[0] + 1; \ + int output_width = (iw - fw + padding[1] * 2) / stride[1] + 1; \ + out.mutable_data({ic, fh, fw, output_height, output_width}, place); \ + ref.mutable_data({ic, fh, fw, output_height, output_width}, place); \ + paddle::operators::math::Im2ColFunctor< \ + paddle::operators::math::ColFormat::kCFO, \ + paddle::platform::CPUDeviceContext, float> \ + im2col + +void testIm2colCPU(int ic, int ih, int iw, int fh, int fw, int ph, int pw) { + PREPARE_IM2COL_CPU; + + im2col(context, input, dilation, stride, padding, &out); + paddle::operators::math::im2col_common(input, dilation, stride, + padding, &ref); + + float* ref_data = ref.data(); + float* out_data = out.data(); + for (int i = 0; i < out.numel(); ++i) { + EXPECT_EQ(out_data[i], ref_data[i]); + } +} + +void benchIm2col(int ic, int ih, int iw, int fh, int fw, int ph, int pw) { + PREPARE_IM2COL_CPU; + constexpr int repeat = 100; + auto GetCurrentMs = []() -> double { + struct timeval time; + gettimeofday(&time, NULL); + return 1e+3 * time.tv_sec + 1e-3 * time.tv_usec; + }; + auto t1 = GetCurrentMs(); + for (int i = 0; i < repeat; ++i) { + im2col(context, input, dilation, stride, padding, &out); + } + auto t2 = GetCurrentMs(); + + for (int i = 0; i < repeat; ++i) { + paddle::operators::math::im2col_common(input, dilation, stride, + padding, &ref); + } + auto t3 = GetCurrentMs(); + + LOG(INFO) << "before: " << (t3 - t2) / repeat + << ",after: " << (t2 - t1) / repeat + << ",boost: " << ((t3 - t2) / (t2 - t1) - 1) * 100 << "%"; +} + +TEST(math, im2col_cputest) { + // padding_h == padding_w + for (int p = 0; p < 4; ++p) { + // width == height + testIm2colCPU(/*ic*/ 2, /*ih*/ 5, /*iw*/ 5, /*fh*/ 4, /*fw*/ 4, /*ph*/ p, + /*pw*/ p); + testIm2colCPU(/*ic*/ 2, /*ih*/ 4, /*iw*/ 4, /*fh*/ 3, /*fw*/ 3, /*ph*/ p, + /*pw*/ p); + testIm2colCPU(/*ic*/ 2, /*ih*/ 4, /*iw*/ 4, /*fh*/ 2, /*fw*/ 2, /*ph*/ p, + /*pw*/ p); + + // height != width + testIm2colCPU(/*ic*/ 2, /*ih*/ 5, /*iw*/ 4, /*fh*/ 2, /*fw*/ 3, /*ph*/ p, + /*pw*/ p); + testIm2colCPU(/*ic*/ 2, /*ih*/ 5, /*iw*/ 4, /*fh*/ 1, /*fw*/ 3, /*ph*/ p, + /*pw*/ p); + testIm2colCPU(/*ic*/ 2, /*ih*/ 4, /*iw*/ 5, /*fh*/ 3, /*fw*/ 1, /*ph*/ p, + /*pw*/ p); + + // filter == 1 + testIm2colCPU(/*ic*/ 3, /*ih*/ 4, /*iw*/ 4, /*fh*/ 1, /*fw*/ 1, /*ph*/ p, + /*pw*/ p); + testIm2colCPU(/*ic*/ 3, /*ih*/ 3, /*iw*/ 4, /*fh*/ 1, /*fw*/ 1, /*ph*/ p, + /*pw*/ p); + } + + // padding_h != padding_w + testIm2colCPU(/*ic*/ 2, /*ih*/ 4, /*iw*/ 4, /*fh*/ 2, /*fw*/ 3, /*ph*/ 1, + /*pw*/ 2); + + // benchmark + for (int p : {0, 1}) { + for (int k : {1, 3, 5}) { + LOG(INFO) << "padding == " << p << ", filter == " << k; + benchIm2col(/*ic*/ 3, /*ih*/ 224, /*iw*/ 224, /*fh*/ k, /*fw*/ k, + /*ph*/ p, /*pw*/ p); + } + } +} diff --git a/paddle/fluid/operators/math/math_function.cc b/paddle/fluid/operators/math/math_function.cc index d62ea387cc..c3387be6da 100644 --- a/paddle/fluid/operators/math/math_function.cc +++ b/paddle/fluid/operators/math/math_function.cc @@ -30,6 +30,7 @@ template struct SetConstant; template struct SetConstant; template struct SetConstant; template struct SetConstant; +template struct SetConstant; #define DEFINE_CPU_TRANS(RANK) \ template struct Transpose; template struct Transpose; \ template struct Transpose; \ template struct Transpose; \ - template struct Transpose; + template struct Transpose; \ + template struct Transpose; \ + template struct Transpose; DEFINE_CPU_TRANS(1); DEFINE_CPU_TRANS(2); diff --git a/paddle/fluid/operators/math/math_function.cu b/paddle/fluid/operators/math/math_function.cu index b5bf84e517..d5af718723 100644 --- a/paddle/fluid/operators/math/math_function.cu +++ b/paddle/fluid/operators/math/math_function.cu @@ -33,9 +33,10 @@ template struct SetConstant; template struct SetConstant; template struct SetConstant; -#define DEFINE_GPU_TRANS(RANK) \ - template struct Transpose; \ - template struct Transpose; +#define DEFINE_GPU_TRANS(RANK) \ + template struct Transpose; \ + template struct Transpose; \ + template struct Transpose; DEFINE_GPU_TRANS(1); DEFINE_GPU_TRANS(2); diff --git a/paddle/fluid/operators/math/math_function.h b/paddle/fluid/operators/math/math_function.h index d4b0e17ed4..7ec78d9ef8 100644 --- a/paddle/fluid/operators/math/math_function.h +++ b/paddle/fluid/operators/math/math_function.h @@ -14,28 +14,11 @@ limitations under the License. */ #pragma once #ifdef PADDLE_WITH_MKLML -#include -#include -#include +#include "paddle/fluid/platform/dynload/mklml.h" #endif #ifdef PADDLE_USE_OPENBLAS #include -#include -#endif - -#ifndef LAPACK_FOUND -extern "C" { -#include // NOLINT -int LAPACKE_sgetrf(int matrix_layout, int m, int n, float* a, int lda, - int* ipiv); -int LAPACKE_dgetrf(int matrix_layout, int m, int n, double* a, int lda, - int* ipiv); -int LAPACKE_sgetri(int matrix_layout, int n, float* a, int lda, - const int* ipiv); -int LAPACKE_dgetri(int matrix_layout, int n, double* a, int lda, - const int* ipiv); -} #endif #include diff --git a/paddle/fluid/operators/math/math_function_impl.h b/paddle/fluid/operators/math/math_function_impl.h index b9bd49d77d..895a7019aa 100644 --- a/paddle/fluid/operators/math/math_function_impl.h +++ b/paddle/fluid/operators/math/math_function_impl.h @@ -155,7 +155,7 @@ class RowwiseSum { PADDLE_ENFORCE_EQ(in_dims.size(), 2U); auto height = in_dims[0]; auto size = in_dims[1]; - PADDLE_ENFORCE_EQ(out->numel(), size); + PADDLE_ENFORCE_EQ(out->numel(), height); T* out_buf = out->mutable_data(out->place()); const T* in_buf = input.data(); diff --git a/paddle/fluid/operators/math/math_function_test.cc b/paddle/fluid/operators/math/math_function_test.cc index 3719a264e9..2343e0ee96 100644 --- a/paddle/fluid/operators/math/math_function_test.cc +++ b/paddle/fluid/operators/math/math_function_test.cc @@ -54,8 +54,64 @@ TEST(math_function, gemm_notrans_cblas) { EXPECT_EQ(input3_ptr[6], 86); EXPECT_EQ(input3_ptr[7], 99); } +#ifdef PADDLE_WITH_LIBXSMM +template +void MklSmmCompare(int m, int n, int k) { + paddle::framework::Tensor mat_a; + paddle::framework::Tensor mat_b; + paddle::framework::Tensor mat_c_smm; + paddle::framework::Tensor mat_c_mkl; + auto* cpu_place = new paddle::platform::CPUPlace(); + + T* A = mat_a.mutable_data({m, k}, *cpu_place); + T* B = mat_b.mutable_data({k, n}, *cpu_place); + T* CSMM = mat_c_smm.mutable_data({m, n}, *cpu_place); + T* CMKL = mat_c_mkl.mutable_data({m, n}, *cpu_place); + T alpha = static_cast(1); + T beta = static_cast(0); + for (int i = 0; i < mat_a.numel(); ++i) { + A[i] = static_cast(i); + } + for (int i = 0; i < mat_b.numel(); ++i) { + B[i] = static_cast(i); + } + // lda,ldb,ldc follow RowMajor + int lda = k; + int ldb = n; + int ldc = n; + + auto smm = [&, m, n, k, lda, ldb, ldc, alpha, beta]() { + const char transa = 'N'; + const char transb = 'N'; + paddle::operators::math::CBlas::SMM_GEMM(&transa, &transb, &n, &m, &k, + &alpha, B, &ldb, A, &lda, &beta, + CSMM, &ldc); + }; -TEST(math_function, gemm_trans_clbas) { + auto mkl = [&, m, n, k, lda, ldb, ldc, alpha, beta]() { + paddle::operators::math::CBlas::GEMM(CblasRowMajor, CblasNoTrans, + CblasNoTrans, m, n, k, alpha, A, + lda, B, ldb, beta, CMKL, ldc); + }; + + smm(); + mkl(); + ASSERT_EQ(mat_c_mkl.numel(), mat_c_smm.numel()); + for (int i = 0; i < mat_c_mkl.numel(); ++i) { + EXPECT_FLOAT_EQ(CSMM[i], CMKL[i]); + } +} +TEST(math_function, gemm_mkl_vs_smm) { + MklSmmCompare(1, 2, 3); + MklSmmCompare(1, 2, 3); + MklSmmCompare(3, 2, 1); + MklSmmCompare(3, 2, 1); + MklSmmCompare(3, 8, 5); + MklSmmCompare(3, 8, 5); +} +#endif + +TEST(math_function, gemm_trans_cblas) { paddle::framework::Tensor input1; paddle::framework::Tensor input2; paddle::framework::Tensor input3; @@ -77,6 +133,8 @@ TEST(math_function, gemm_trans_clbas) { paddle::platform::CPUDeviceContext context(*cpu_place); GetBlas(context).GEMM(false, true, m, n, k, 1, input1_ptr, 3, input2_ptr + 3, 3, 1, input3_ptr + 1, 4); + delete cpu_place; + cpu_place = NULL; EXPECT_EQ(input3_ptr[0], 0); EXPECT_EQ(input3_ptr[1], 24); @@ -170,3 +228,57 @@ TEST(math_funciton, set_constant) { } delete ctx; } + +template +void GemmWarpTest(int m, int n, int k, T alpha, T beta) { + paddle::framework::Tensor mat_a; + paddle::framework::Tensor mat_b; + paddle::framework::Tensor mat_c_ref; + paddle::framework::Tensor mat_c_mkl; + auto* cpu_place = new paddle::platform::CPUPlace(); + + T* A = mat_a.mutable_data({m, k}, *cpu_place); + T* B = mat_b.mutable_data({k, n}, *cpu_place); + T* CREF = mat_c_ref.mutable_data({m, n}, *cpu_place); + T* CMKL = mat_c_mkl.mutable_data({m, n}, *cpu_place); + + ASSERT_EQ(mat_c_mkl.numel(), mat_c_ref.numel()); + for (int i = 0; i < mat_a.numel(); ++i) { + A[i] = static_cast(i); + } + for (int i = 0; i < mat_b.numel(); ++i) { + B[i] = static_cast(i + 1); + } + for (int i = 0; i < mat_c_ref.numel(); ++i) { + CREF[i] = static_cast(i + 2); + CMKL[i] = CREF[i]; + } + + // this would call gemm_warp + paddle::platform::CPUDeviceContext context(*cpu_place); + GetBlas(context).GEMM(CblasNoTrans, CblasNoTrans, m, n, k, alpha, A, B, + beta, CREF); + + // lda,ldb,ldc follow RowMajor + int lda = k; + int ldb = n; + int ldc = n; + paddle::operators::math::CBlas::GEMM(CblasRowMajor, CblasNoTrans, + CblasNoTrans, m, n, k, alpha, A, lda, + B, ldb, beta, CMKL, ldc); + + for (int i = 0; i < mat_c_mkl.numel(); ++i) { + EXPECT_FLOAT_EQ(CREF[i], CMKL[i]); + } +} + +TEST(math_function, gemm_warp) { + GemmWarpTest(3, 2, 5, 1.f, 0.f); + GemmWarpTest(3, 2, 5, 2.f, 1.f); + GemmWarpTest(8, 5, 6, 1.f, 0.f); + GemmWarpTest(8, 5, 6, 2.f, 1.f); + GemmWarpTest(3, 2, 5, 1.0, 0.0); + GemmWarpTest(3, 2, 5, 2.0, 1.0); + GemmWarpTest(8, 5, 6, 1.0, 0.0); + GemmWarpTest(8, 5, 6, 2.0, 1.0); +} diff --git a/paddle/fluid/operators/math/matmul.h b/paddle/fluid/operators/math/matmul.h deleted file mode 100644 index 87fd38a324..0000000000 --- a/paddle/fluid/operators/math/matmul.h +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include -#include "paddle/fluid/operators/math/blas.h" - -namespace paddle { -namespace operators { -namespace math { - -// Implements the logic of numpy matmul: -// https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.matmul.html -// -// but allowing also for a, b to be transposed -// -// Both a & b can be 1- to 3-dimensional. Higher rank tensors are not supported -// yet. -template -class MatMulFunctor { - public: - void operator()(const DeviceContext& context, const framework::Tensor& a, - bool trans_a, const framework::Tensor& b, bool trans_b, - T alpha, framework::Tensor* out, T beta) { - auto dim_a = a.dims(); - auto dim_b = b.dims(); - - PADDLE_ENFORCE(a.place() == b.place() && b.place() == out->place(), - "Tensors must all be in the same place."); - PADDLE_ENFORCE_GE(dim_a.size(), 1, - "Input tensor a must be at least 1-dimensional."); - PADDLE_ENFORCE_GE(dim_b.size(), 1, - "Input tensor b must be at least 1-dimensional."); - - std::vector out_dim; - int64_t batch_count = 1; - if (dim_a.size() > 3) { - PADDLE_ENFORCE(dim_b.size() == dim_a.size(), - "The dimensions of X and Y must be the same, and both of " - "them should be %d-dimensional.", - dim_b.size()); - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - for (int j = 0; j < dim_a.size() - 2; ++j) { - PADDLE_ENFORCE_EQ(dim_b[j], dim_a[j], - "The %d-th dimension of X and Y must be the same.", - j); - out_dim.push_back(dim_a[j]); - batch_count *= dim_a[j]; - } - } - - int M = 0, N = 0, kA = 0, kB = 0, batchCountA = 0, batchCountB = 0, - strideA = 0, strideB = 0; - - switch (dim_a.size()) { - case 1: - // similar to np.matmul: - // prepend dimension 1 (no transpose) or append dimension 1 (transpose) - M = trans_a ? dim_a[0] : 1; - kA = trans_a ? 1 : dim_a[0]; - break; - case 2: - M = trans_a ? dim_a[1] : dim_a[0]; - kA = trans_a ? dim_a[0] : dim_a[1]; - break; - case 3: - batchCountA = dim_a[0]; - M = trans_a ? dim_a[2] : dim_a[1]; - kA = trans_a ? dim_a[1] : dim_a[2]; - strideA = M * kA; - break; - default: - batchCountA = batch_count; - size_t mat_s = dim_a.size() - 2; - M = trans_a ? dim_a[mat_s + 1] : dim_a[mat_s]; - kA = trans_a ? dim_a[mat_s] : dim_a[mat_s + 1]; - strideA = M * kA; - } - - switch (dim_b.size()) { - case 1: - // similar to np.matmul: - // append dimension 1 (no transpose) or prepend dimension 1 (transpose) - kB = trans_b ? 1 : dim_b[0]; - N = trans_b ? dim_b[0] : 1; - break; - case 2: - kB = trans_b ? dim_b[1] : dim_b[0]; - N = trans_b ? dim_b[0] : dim_b[1]; - break; - case 3: - batchCountB = dim_b[0]; - kB = trans_b ? dim_b[2] : dim_b[1]; - N = trans_b ? dim_b[1] : dim_b[2]; - strideB = kB * N; - break; - default: - batchCountB = batch_count; - size_t mat_s = dim_b.size() - 2; - kB = trans_b ? dim_b[mat_s + 1] : dim_b[mat_s]; - N = trans_b ? dim_b[mat_s] : dim_b[mat_s + 1]; - strideB = kB * N; - } - - PADDLE_ENFORCE_EQ( - kA, kB, - "First matrix's width must be equal with second matrix's height."); - if (batchCountA && batchCountB) { - PADDLE_ENFORCE_EQ( - batchCountA, batchCountB, - "When input tensors a and b are both batched, they must have the " - "same batch dimension."); - } - int batchCount = std::max(batchCountA, batchCountB); - - CBLAS_TRANSPOSE transA = (trans_a == false) ? CblasNoTrans : CblasTrans; - CBLAS_TRANSPOSE transB = (trans_b == false) ? CblasNoTrans : CblasTrans; - - auto blas = GetBlas(context); - - if (!batchCount) { - // regular matrix multiplication - blas.GEMM(transA, transB, M, N, kA, alpha, a.data(), b.data(), beta, - out->data()); - } else { - // batched matrix multiplication - blas.BatchedGEMM(transA, transB, M, N, kA, alpha, a.data(), - b.data(), beta, out->data(), batchCount, strideA, - strideB); - } - } -}; - -} // namespace math -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/math/matrix_bit_code.cc b/paddle/fluid/operators/math/matrix_bit_code.cc new file mode 100644 index 0000000000..1e56e29739 --- /dev/null +++ b/paddle/fluid/operators/math/matrix_bit_code.cc @@ -0,0 +1,176 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/matrix_bit_code.h" +#include +namespace paddle { +namespace operators { +namespace math { + +template +void MatrixBitCodeFunctor::Add(framework::Tensor* tmat, + const framework::Tensor& vec) { + SimpleCodeTable code_table(num_classes_); + size_t batch_size = tmat->dims()[0]; + size_t width = tmat->dims()[1]; + for (size_t i = 0; i < batch_size; ++i) { + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + size_t index = code.calc_index(j); + tmat->data()[i * width + j] += vec.data()[index]; + } + } +} + +template +void MatrixBitCodeFunctor::AddGrad(const framework::Tensor& tmat, + framework::Tensor* vec) { + SimpleCodeTable code_table(num_classes_); + size_t batch_size = tmat.dims()[0]; + size_t width = tmat.dims()[1]; + for (size_t i = 0; i < batch_size; ++i) { + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + size_t index = code.calc_index(j); + vec->data()[index] += tmat.data()[i * width + j]; + } + } +} + +template +void MatrixBitCodeFunctor::Sum(const framework::Tensor& tmat, + framework::Tensor* sum, T scale_sum) { + SimpleCodeTable code_table(num_classes_); + size_t num_samples = tmat.dims()[0]; + size_t o_width = tmat.dims()[1]; + for (size_t i = 0; i < num_samples; ++i) { + T sm = static_cast(0.0); + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + if (code.calc_bit(j)) { + // calc_bit starts from right most bit, while data in tmat[i] is in the + // reverse order. + sm += tmat.data()[i * o_width + j]; + } + } + sum->data()[i] = scale_sum * sm; + } +} + +template +void MatrixBitCodeFunctor::Mul(framework::Tensor* tmat, + const framework::Tensor& weight, + const framework::Tensor& input) { + SimpleCodeTable code_table(num_classes_); + size_t num_samples = tmat->dims()[0]; + size_t tmat_width = tmat->dims()[1]; + size_t input_width = input.dims()[1]; + size_t weight_width = weight.dims()[1]; + auto tmat_value = tmat->data(); + auto weight_value = weight.data(); + auto input_value = input.data(); + for (size_t i = 0; i < num_samples; ++i) { + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + size_t index = code.calc_index(j); + T sum = static_cast(0.0); + for (size_t k = 0; k < input_width; ++k) { + sum += weight_value[weight_width * index + k] * + input_value[input_width * i + k]; + } + tmat_value[i * tmat_width + j] += sum; + } + } +} + +template +void MatrixBitCodeFunctor::MulGradWeight(const framework::Tensor& tmat, + framework::Tensor* weight, + const framework::Tensor& input) { + SimpleCodeTable code_table(num_classes_); + size_t num_samples = tmat.dims()[0]; + size_t input_width = input.dims()[1]; + size_t tmat_width = tmat.dims()[1]; + size_t weight_width = weight->dims()[1]; + auto tmat_value = tmat.data(); + auto weight_value = weight->data(); + auto input_value = input.data(); + for (size_t i = 0; i < num_samples; ++i) { + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + size_t index = code.calc_index(j); + + for (size_t k = 0; k < input_width; ++k) { + weight_value[weight_width * index + k] += + tmat_value[i * tmat_width + j] * input_value[input_width * i + k]; + } + } + } +} + +template +void MatrixBitCodeFunctor::MulGradError(const framework::Tensor& tmat, + const framework::Tensor& weight, + framework::Tensor* input) { + SimpleCodeTable code_table(num_classes_); + size_t num_samples = tmat.dims()[0]; + size_t tmat_width = tmat.dims()[1]; + size_t input_width = input->dims()[1]; + size_t weight_width = weight.dims()[1]; + auto tmat_value = tmat.data(); + auto weight_value = weight.data(); + auto input_value = input->data(); + + for (size_t i = 0; i < num_samples; ++i) { + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + size_t index = code.calc_index(j); + + for (size_t k = 0; k < input_width; ++k) { + input_value[input_width * i + k] += + tmat_value[i * tmat_width + j] * + weight_value[weight_width * index + k]; + } + } + } +} + +template +void MatrixBitCodeFunctor::Sub(framework::Tensor* tmat) { + SimpleCodeTable code_table(num_classes_); + size_t num_samples = tmat->dims()[0]; + size_t o_width = tmat->dims()[1]; + for (size_t i = 0; i < num_samples; ++i) { + auto code = code_table(static_cast(ids_[i])); + int code_length = code.get_length(); + for (int j = 0; j < code_length; ++j) { + if (code.calc_bit(j)) { + tmat->data()[i * o_width + j] -= 1; + } + } + } +} + +template class MatrixBitCodeFunctor; +template class MatrixBitCodeFunctor; + +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/matrix_bit_code.h b/paddle/fluid/operators/math/matrix_bit_code.h new file mode 100644 index 0000000000..5454d58f37 --- /dev/null +++ b/paddle/fluid/operators/math/matrix_bit_code.h @@ -0,0 +1,143 @@ +/* Copyright (c) 2017 PaddlePaddle Authors. All Rights Reserve. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/platform/device_context.h" + +namespace paddle { +namespace operators { +namespace math { +/** + * SimpleCodeTable class should support 3 functions: + * + * size_t size() + * return the number of ids + * + * int get_max_code_length() + * return the maximal code length + * + * SimpleCode operator()(size_t i) + * return the i-th code. Code class is descriebed below. + * + * SimpleCode class should support 3 functions: + * + * int get_length() + * return the length of the code + * + * size_t cal_index(int bit) + * bit ranges from 0 to get_length() - 1 + * return the index for the (1+bit) level parent + * + * bool calc_bit(int bit) + * return true if the bit level parent is the right child of (1+bit) level + * parent + * + */ + +/** + * return the 1-based index of the highest bit set + * + * for x > 0: + * \f[ + * FindLastSet(x) = 1 + \floor*{\log_{2}x} + * \f] + */ +inline constexpr size_t FindLastSet(size_t x) { + return std::is_same::value + ? (x ? 8 * sizeof(x) - __builtin_clz(x) : 0) + : (std::is_same::value // NOLINT + ? (x ? 8 * sizeof(x) - __builtin_clzl(x) : 0) + : (x ? 8 * sizeof(x) - __builtin_clzll(x) : 0)); +} + +struct SimpleCode { + SimpleCode(size_t code, size_t num_classes) : c_(code + num_classes) {} + /** + * Here the id of root shoud be 1 rather than 0, thus the encoding of class c + * is `c + num_classes` and all siblings can get the same weight indice using + * prefixes. + * Weight index is the prefixes of encoding, thus leave out the right most + * bit in calc_index. + * Binary classification path is the suffixes of encoding, thus leave out the + * left most bit in calc_bit. + */ + inline size_t calc_index(int bit) const { return (c_ >> (bit + 1)) - 1; } + inline bool calc_bit(int bit) const { return c_ & (1 << bit); } + inline int get_length() const { return FindLastSet(c_) - 1; } + + private: + size_t c_; +}; + +struct SimpleCodeTable { + explicit SimpleCodeTable(size_t num_classes) : num_classes_(num_classes) {} + SimpleCode operator()(size_t code) const { + return SimpleCode(code, num_classes_); + } + size_t size() const { return num_classes_; } + int get_max_code_length() const { return FindLastSet(num_classes_ - 1); } + + private: + size_t num_classes_; +}; + +template +class MatrixBitCodeFunctor { + public: + explicit MatrixBitCodeFunctor(size_t num_classes, const int64_t* ids) + : num_classes_(num_classes), ids_(ids) {} + /* For j < code_length + tmat(i, j) += vec(0, index(i, j)) + */ + void Add(framework::Tensor* tmat, const framework::Tensor& vec); + + /* For j < code_length + vec(0, index(i, j)) += tmat(i, j) + */ + void AddGrad(const framework::Tensor& tmat, framework::Tensor* vec); + + /* For j < code_length + sum(i, 0) = \sum_j bit(i, j) * tmat(i, j) + */ + void Sum(const framework::Tensor& tmat, framework::Tensor* sum, T scale_sum); + + /* For j < code_length + tmat(i, j) -= bit(i, j) + */ + void Sub(framework::Tensor* tmat); + /* For j < code_length + input.row(i) += tmat(i, j) * weight.row(index(i, j)) + */ + void Mul(framework::Tensor* tmat, const framework::Tensor& weight, + const framework::Tensor& input); + + /* For index(i, j) >= 0: + weight.row(index(i, j)) += tmat(i, j) * input.row(i) + */ + void MulGradWeight(const framework::Tensor& tmat, framework::Tensor* weight, + const framework::Tensor& input); + /* For j < code_length + input.row(i) += tmat(i, j) * weight.row(index(i, j)) + */ + void MulGradError(const framework::Tensor& tmat, + const framework::Tensor& weight, framework::Tensor* input); + + size_t num_classes_; + const int64_t* ids_; +}; +} // namespace math +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/math/sequence2batch.h b/paddle/fluid/operators/math/sequence2batch.h index 0abda999a5..07372235a7 100644 --- a/paddle/fluid/operators/math/sequence2batch.h +++ b/paddle/fluid/operators/math/sequence2batch.h @@ -64,18 +64,22 @@ class LoDTensor2BatchFunctor { bool is_reverse = false) const { if (!is_cal_batch_lod) { auto lods = batch->lod(); - PADDLE_ENFORCE_GT(lods.size(), 2UL); - PADDLE_ENFORCE_EQ(lods[1].size(), - static_cast(lod_tensor.dims()[0])); + PADDLE_ENFORCE_GT(lods.size(), 2UL, + "The LoD of LoDTensor should inlcude at least 2-level " + "sequence information."); + PADDLE_ENFORCE_EQ( + lods[1].size(), static_cast(lod_tensor.dims()[0]), + "The LoD information should be consistent with the dims."); CopyMatrixRowsFunctor to_batch; to_batch(context, lod_tensor, lods[1], batch, true); return; } auto lods = lod_tensor.lod(); - auto lod = lods[0]; PADDLE_ENFORCE_EQ(lods.size(), 1UL, "Only support one level sequence now."); + const auto& lod = lods[0]; + std::vector seq_info; for (size_t seq_id = 0; seq_id < lod.size() - 1; ++seq_id) { int length = lod[seq_id + 1] - lod[seq_id]; @@ -157,9 +161,12 @@ class Batch2LoDTensorFunctor { const framework::LoDTensor& batch, framework::LoDTensor* lod_tensor) const { auto in_lod = batch.lod(); - PADDLE_ENFORCE_GT(in_lod.size(), 2UL); - PADDLE_ENFORCE_EQ(in_lod[1].size(), - static_cast(lod_tensor->dims()[0])); + PADDLE_ENFORCE_GT(in_lod.size(), 2UL, + "The LoD of LoDTensor should inlcude at least 2-level " + "sequence information."); + PADDLE_ENFORCE_EQ( + in_lod[1].size(), static_cast(lod_tensor->dims()[0]), + "The LoD information should be consistent with the dims."); CopyMatrixRowsFunctor to_seq; to_seq(context, batch, in_lod[1], lod_tensor, false); } diff --git a/paddle/fluid/operators/math/softmax.cu b/paddle/fluid/operators/math/softmax.cu index a579182ec1..3effe77625 100644 --- a/paddle/fluid/operators/math/softmax.cu +++ b/paddle/fluid/operators/math/softmax.cu @@ -52,7 +52,7 @@ void SoftmaxCUDNNFunctor::operator()( xDesc.descriptor(layout, cudnn_tensor_dims); cudnnTensorDescriptor_t cudnn_y_desc = xDesc.descriptor(layout, cudnn_tensor_dims); - PADDLE_ENFORCE(platform::dynload::cudnnSoftmaxForward( + CUDNN_ENFORCE(platform::dynload::cudnnSoftmaxForward( context.cudnn_handle(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, CudnnDataType::kOne(), cudnn_x_desc, X->data(), CudnnDataType::kZero(), cudnn_y_desc, @@ -83,7 +83,7 @@ void SoftmaxGradCUDNNFunctor::operator()( dxDesc.descriptor(layout, cudnn_tensor_dims); cudnnTensorDescriptor_t cudnn_ygrad_desc = dyDesc.descriptor(layout, cudnn_tensor_dims); - PADDLE_ENFORCE(platform::dynload::cudnnSoftmaxBackward( + CUDNN_ENFORCE(platform::dynload::cudnnSoftmaxBackward( context.cudnn_handle(), CUDNN_SOFTMAX_ACCURATE, CUDNN_SOFTMAX_MODE_INSTANCE, CudnnDataType::kOne(), cudnn_y_desc, Y->data(), cudnn_ygrad_desc, YGrad->data(), diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index e5d33fbc36..7182149164 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -12,21 +12,264 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/matmul_op.h" #include +#include #include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/math/blas.h" namespace paddle { namespace operators { +/** + * Get row matrix shape from a vector shape. If the rank of x_dim > 1, the + * original x_dim is returned. + */ +static framework::DDim RowMatrixFromVector(const framework::DDim &x_dim) { + if (x_dim.size() > 1) { + return x_dim; + } + return framework::make_ddim({1, x_dim[0]}); +} + +/** + * Get column matrix shape from a vector shape. If the ran of y_dim > 1, the + * original y_dim is returned. + */ +static framework::DDim ColumnMatrixFromVector(const framework::DDim &y_dim) { + if (y_dim.size() > 1) { + return y_dim; + } + return framework::make_ddim({y_dim[0], 1}); +} + +template +class MatMulKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &context) const override { + auto &x = + detail::Ref(context.Input("X"), "Cannot find X"); + auto &y = + detail::Ref(context.Input("Y"), "Cannot find Y"); + auto *out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + + auto blas = math::GetBlas(context); + auto mat_dim_a = math::CreateMatrixDescriptor( + RowMatrixFromVector(x.dims()), 0, context.Attr("transpose_X")); + auto mat_dim_b = math::CreateMatrixDescriptor( + ColumnMatrixFromVector(y.dims()), 0, context.Attr("transpose_Y")); + blas.MatMul(x, mat_dim_a, y, mat_dim_b, T(1), out, T(0)); + } +}; + +// Reshape a rank-3 tensor from P x M x N to (P * M) x N. +// Identity op if the tensor is not of rank 3. +static framework::Tensor FoldInitDims(const framework::Tensor &input) { + auto output = input; + auto in_dims = input.dims(); + if (in_dims.size() == 3) { + output.Resize({in_dims[0] * in_dims[1], in_dims[2]}); + } + return output; +} + +// Reshape a rank-3 tensor from P x M x N to M x (P * N). +// (Warning: This requires transposing data and writes into new memory.) +// Identity op if the tensor is not of rank 3. +template +static framework::Tensor FoldHeadAndLastDims(const DeviceContext &context, + const framework::Tensor &input) { + auto in_dims = input.dims(); + if (in_dims.size() != 3) { + return input; + } + framework::Tensor output; + output.Resize({in_dims[1], in_dims[0], in_dims[2]}); + output.mutable_data(context.GetPlace()); + std::vector axis = {1, 0, 2}; + math::Transpose trans; + trans(context, input, &output, axis); + output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); + + return output; +} + +/** + * Reshape a tensor to 3-D or 2-D tensor by matrix descriptor. + * + * The shape would be [BatchSize, H, W] or [H, W]. + * If transposed, `H,W` will be swapped. + */ +static void ReshapeTensorIntoMatrixSequence( + framework::Tensor *x, const math::MatDescriptor &descriptor) { + int64_t h, w; + h = descriptor.height_; + w = descriptor.width_; + if (descriptor.trans_) { + std::swap(w, h); + } + if (descriptor.batch_size_) { + x->Resize({descriptor.batch_size_, h, w}); + } else { + x->Resize({h, w}); + } +} + +/** + * Reshape the x,y,out tensor to 3-D or 2-D tensor by matrix descriptor + * Out = matmul(x, y) + * + * This method will first calculate X,Y matrix sequence, and then calculate + * the out shape. + * + * Assume X = [BatchSize, H1, W1], Y = [BatchSize, H2, W2] + * The out = [BatchSize, H1, W2] + * + * If there is no batch size in `X` and `Y`, the out will be [H1, W2] + * If any of `X` and `Y` has batch size BatchSize, the out will have the + * BatchSize. + */ +static void ReshapeXYOutIntoMatrixSequence(framework::Tensor *x, + framework::Tensor *y, + framework::Tensor *out, bool trans_x, + bool trans_y) { + auto x_dim = RowMatrixFromVector(x->dims()); + auto y_dim = ColumnMatrixFromVector(y->dims()); + auto mat_dim_x = math::CreateMatrixDescriptor(x_dim, 0, trans_x); + auto mat_dim_y = math::CreateMatrixDescriptor(y_dim, 0, trans_y); + if (mat_dim_x.batch_size_ == 0 && mat_dim_y.batch_size_ == 0) { + out->Resize({mat_dim_x.height_, mat_dim_y.width_}); + } else { + out->Resize({std::max(mat_dim_x.batch_size_, mat_dim_y.batch_size_), + mat_dim_x.height_, mat_dim_y.width_}); + } + + ReshapeTensorIntoMatrixSequence(x, mat_dim_x); + ReshapeTensorIntoMatrixSequence(y, mat_dim_y); +} + +// Using dimensional constraints on matrix multiplication, it is +// straight-forward to check the following table for when X and Y +// are both matrices. +// +// transpose_X | False | True | False | True +// transpose_Y | False | False | True | True +// -----------+----------+----------+----------+----------- +// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T +// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T +// +// When X is a vector of size K, we treat it instead as a matrix of shape +// (1, K). Similarly, when Y is a vector of size K, we treat it instead as +// a matrix of shape (K, 1). +// +// When X and Y are both 3-dimensional tensors, then the first dimension +// the batch dimension can be ignored and the exact same formulas apply +// as for two matrices. +// +// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end +// up with formulas like +// +// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} +// +// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N +// to X: (P * M) x K, dOut: (P * M) x N. +template +class MatMulGradKernel : public framework::OpKernel { + public: + void MatMul(const framework::ExecutionContext &context, + const framework::Tensor &a, bool trans_a, + const framework::Tensor &b, bool trans_b, + framework::Tensor *out) const { + out->mutable_data(context.GetPlace()); + auto blas = math::GetBlas(context); + auto mat_dim_a = math::CreateMatrixDescriptor(a.dims(), 0, trans_a); + auto mat_dim_b = math::CreateMatrixDescriptor(b.dims(), 0, trans_b); + blas.MatMul(a, mat_dim_a, b, mat_dim_b, T(1), out, T(0)); + } + + void CalcInputGrad(const framework::ExecutionContext &context, + const framework::Tensor &a, bool trans_a, + bool is_fold_init_dims_a, const framework::Tensor &b, + bool trans_b, bool is_fold_init_dims_b, + framework::Tensor *out) const { + if (out == nullptr) return; + bool need_combine = (a.dims().size() == 3 || b.dims().size() == 3) && + out->dims().size() == 2; + if (!need_combine) { + MatMul(context, a, trans_a, b, trans_b, out); + } else { + auto &ctx = context.template device_context(); + MatMul(context, is_fold_init_dims_a + ? FoldInitDims(a) + : FoldHeadAndLastDims(ctx, a), + trans_a, is_fold_init_dims_b + ? FoldInitDims(b) + : FoldHeadAndLastDims(ctx, b), + trans_b, out); + } + } + + void Compute(const framework::ExecutionContext &context) const override { + auto x = *context.Input("X"); + auto y = *context.Input("Y"); + auto dout = + *context.Input(framework::GradVarName("Out")); + auto *dx = context.Output(framework::GradVarName("X")); + auto *dy = context.Output(framework::GradVarName("Y")); + bool transpose_x = context.Attr("transpose_X"); + bool transpose_y = context.Attr("transpose_Y"); + + ReshapeXYOutIntoMatrixSequence(&x, &y, &dout, transpose_x, transpose_y); + framework::DDim dx_dims; + if (dx) { + dx_dims = dx->dims(); + if (dx_dims != x.dims()) { + dx->Resize(x.dims()); + } + } + + framework::DDim dy_dims; + if (dy) { + dy_dims = dy->dims(); + if (dy_dims != y.dims()) { + dy->Resize(y.dims()); + } + } -using framework::Tensor; + if (transpose_x && transpose_y) { + CalcInputGrad(context, y, true, true, dout, true, false, dx); + CalcInputGrad(context, dout, true, true, x, true, false, dy); + } else if (transpose_x) { + CalcInputGrad(context, y, false, false, dout, true, false, dx); + CalcInputGrad(context, x, false, false, dout, false, true, dy); + } else if (transpose_y) { + CalcInputGrad(context, dout, false, false, y, false, true, dx); + CalcInputGrad(context, dout, true, true, x, false, true, dy); + } else { + CalcInputGrad(context, dout, false, false, y, true, false, dx); + CalcInputGrad(context, x, true, true, dout, false, true, dy); + } + + if (dx) { + if (dx_dims != x.dims()) { + dx->Resize(dx_dims); + } + } + if (dy) { + if (dy_dims != y.dims()) { + dy->Resize(dy_dims); + } + } + } +}; class MatMulOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContext* context) const override { + void InferShape(framework::InferShapeContext *context) const override { PADDLE_ENFORCE(context->HasInput("X"), "Input(X) of MatMulOp should not be null."); PADDLE_ENFORCE(context->HasInput("Y"), @@ -36,121 +279,41 @@ class MatMulOp : public framework::OperatorWithKernel { auto dim_x = context->GetInputDim("X"); auto dim_y = context->GetInputDim("Y"); - bool transpose_x = context->Attrs().Get("transpose_X"); - bool transpose_y = context->Attrs().Get("transpose_Y"); - - PADDLE_ENFORCE_GE(dim_x.size(), 1, - "Input tensor X must be at least 1-dimensional."); - PADDLE_ENFORCE_GE(dim_y.size(), 1, - "Input tensor Y must be at least 1-dimensional."); - - std::vector out_dim; - int64_t batch_count = 1; - if (dim_x.size() > 3) { - PADDLE_ENFORCE_EQ( - dim_y.size(), dim_x.size(), - "The dimensions of X and Y must be the same, and both of " - "them should be %d-dimensional.", - dim_x.size()); - - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - for (int j = 0; j < dim_x.size() - 2; ++j) { - PADDLE_ENFORCE_EQ(dim_y[j], dim_x[j], - "The %d-th dimension of X and Y must be the same.", - j); - out_dim.push_back(dim_x[j]); - batch_count *= dim_x[j]; - } - } - int M = 0, N = 0, KX = 0, KY = 0, batchCountX = 0, batchCountY = 0; - bool remove_initial_dim = false, remove_final_dim = false; - - switch (dim_x.size()) { - case 1: - if (transpose_x) { - M = dim_x[0]; - KX = 1; - } else { - M = 1; - KX = dim_x[0]; - remove_initial_dim = true; - } - break; - case 2: - M = transpose_x ? dim_x[1] : dim_x[0]; - KX = transpose_x ? dim_x[0] : dim_x[1]; - break; - case 3: - batchCountX = dim_x[0]; - M = transpose_x ? dim_x[2] : dim_x[1]; - KX = transpose_x ? dim_x[1] : dim_x[2]; - break; - default: - batchCountX = batch_count; - size_t mat_s = dim_x.size() - 2; - M = transpose_x ? dim_x[mat_s + 1] : dim_x[mat_s]; - KX = transpose_x ? dim_x[mat_s] : dim_x[mat_s + 1]; - break; - } + auto mat_dim_x = + math::CreateMatrixDescriptor(RowMatrixFromVector(dim_x), 0, + context->Attrs().Get("transpose_X")); + auto mat_dim_y = + math::CreateMatrixDescriptor(ColumnMatrixFromVector(dim_y), 0, + context->Attrs().Get("transpose_Y")); - switch (dim_y.size()) { - case 1: - if (transpose_y) { - N = dim_y[0]; - KY = 1; - } else { - N = 1; - KY = dim_y[0]; - remove_final_dim = true; - } - break; - case 2: - KY = transpose_y ? dim_y[1] : dim_y[0]; - N = transpose_y ? dim_y[0] : dim_y[1]; - break; - case 3: - batchCountY = dim_y[0]; - KY = transpose_y ? dim_y[2] : dim_y[1]; - N = transpose_y ? dim_y[1] : dim_y[2]; - break; - default: - batchCountY = batch_count; - size_t mat_s = dim_y.size() - 2; - KY = transpose_y ? dim_y[mat_s + 1] : dim_y[mat_s]; - N = transpose_y ? dim_y[mat_s] : dim_y[mat_s + 1]; + PADDLE_ENFORCE_EQ(mat_dim_x.width_, mat_dim_y.height_); + PADDLE_ENFORCE(mat_dim_x.batch_size_ == mat_dim_y.batch_size_ || + mat_dim_x.batch_size_ == 0 || mat_dim_y.batch_size_ == 0); + std::vector dim_out; + if (mat_dim_x.batch_size_ != 0) { + dim_out = framework::vectorize(dim_x); + dim_out[dim_out.size() - 2] = mat_dim_x.height_; + dim_out[dim_out.size() - 1] = mat_dim_y.width_; + } else if (mat_dim_y.batch_size_ != 0) { + dim_out = framework::vectorize(dim_y); + dim_out[dim_out.size() - 2] = mat_dim_x.height_; + dim_out[dim_out.size() - 1] = mat_dim_y.width_; + } else { + dim_out = {mat_dim_x.height_, mat_dim_y.width_}; } - PADDLE_ENFORCE_EQ( - KX, KY, - "First matrix's width must be equal with second matrix's height."); - if (batchCountX && batchCountY) { - PADDLE_ENFORCE_EQ( - batchCountX, batchCountY, - "When Input(X) and Input(Y) are both three dimensional, they " - "must have the same batch dimension."); + if (dim_x.size() == 1 && dim_out[dim_out.size() - 2] == 1) { + std::swap(dim_out[dim_out.size() - 2], dim_out[dim_out.size() - 1]); + dim_out.resize(dim_out.size() - 1); } - int batchCount = std::max(batchCountX, batchCountY); - std::vector dim_out; - if (batchCount) { - if (dim_x.size() > 3) { - dim_out.insert(dim_out.begin(), out_dim.begin(), out_dim.end()); - } else { - dim_out.push_back(batchCount); - } + if (dim_y.size() == 1 && dim_out[dim_out.size() - 1] == 1) { + dim_out.resize(dim_out.size() - 1); } - if (!remove_initial_dim) { - dim_out.push_back(M); - } - if (!remove_final_dim) { - dim_out.push_back(N); - } - if (dim_out.size() == 0) { - // We don't support 0-dimensional Tensors (scalars), so instead - // treat the output as a Tensor of shape (1, ) in this case. - dim_out.push_back(1); + + if (dim_out.empty()) { + dim_out = {1}; } context->SetOutputDim("Out", framework::make_ddim(dim_out)); context->ShareLoD("X", /*->*/ "Out"); @@ -159,8 +322,7 @@ class MatMulOp : public framework::OperatorWithKernel { class MatMulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MatMulOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The first input of MatMul op"); AddInput("Y", "The second input of MatMul op"); AddOutput("Out", "The output of MatMul op"); @@ -213,7 +375,7 @@ class MatMulOpGrad : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; protected: - void InferShape(framework::InferShapeContext* context) const override { + void InferShape(framework::InferShapeContext *context) const override { PADDLE_ENFORCE(context->HasInput("X"), "Input(X) should not be null"); PADDLE_ENFORCE(context->HasInput("Y"), "Input(Y) should not be null"); PADDLE_ENFORCE(context->HasInput(framework::GradVarName("Out")), @@ -233,15 +395,52 @@ class MatMulOpGrad : public framework::OperatorWithKernel { } }; +class MatMulOpGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto *retv = new framework::OpDesc(); + retv->SetType("matmul_grad"); + retv->SetInput("X", Input("X")); + retv->SetInput("Y", Input("Y")); + retv->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + retv->SetOutput(framework::GradVarName("X"), InputGrad("X")); + retv->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); + retv->SetAttrMap(Attrs()); + return std::unique_ptr(retv); + } +}; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(matmul, ops::MatMulOp, ops::MatMulOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::MatMulOpGradMaker); REGISTER_OPERATOR(matmul_grad, ops::MatMulOpGrad); REGISTER_OP_CPU_KERNEL( - matmul, ops::MatMulKernel); + matmul, ops::MatMulKernel, + ops::MatMulKernel, + ops::MatMulKernel); REGISTER_OP_CPU_KERNEL( matmul_grad, - ops::MatMulGradKernel); + ops::MatMulGradKernel, + ops::MatMulGradKernel, + ops::MatMulGradKernel); + +#ifdef PADDLE_WITH_CUDA +REGISTER_OP_CUDA_KERNEL( + matmul, ops::MatMulKernel, + ops::MatMulKernel, + ops::MatMulKernel); +REGISTER_OP_CUDA_KERNEL( + matmul_grad, + ops::MatMulGradKernel, + ops::MatMulGradKernel, + ops::MatMulGradKernel); +#endif diff --git a/paddle/fluid/operators/matmul_op.h b/paddle/fluid/operators/matmul_op.h deleted file mode 100644 index f2e9cfdcdb..0000000000 --- a/paddle/fluid/operators/matmul_op.h +++ /dev/null @@ -1,244 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once -#include -#include -#include -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/math/math_function.h" -#include "paddle/fluid/operators/math/matmul.h" - -namespace paddle { -namespace operators { -namespace matmul_detail { - -using Tensor = framework::Tensor; -using DDim = framework::DDim; -using framework::make_ddim; -using framework::vectorize; - -template -class MatMulKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor& x = *context.Input("X"); - const Tensor& y = *context.Input("Y"); - Tensor* out = context.Output("Out"); - out->mutable_data(context.GetPlace()); - bool transpose_x = context.Attr("transpose_X"); - bool transpose_y = context.Attr("transpose_Y"); - - math::MatMulFunctor()( - context.template device_context(), x, transpose_x, y, - transpose_y, T(1), out, T(0)); - } -}; - -template -inline Tensor Reshape(const Tensor& input, const DDim& dims) { - Tensor output; - output.ShareDataWith(input); - output.Resize(dims); - return output; -} - -// Reshape a rank-3 tensor from P x M x N to (P * M) x N. -// Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndM(const Tensor& input) { - Tensor output; - output.ShareDataWith(input); - auto in_dims = input.dims(); - if (in_dims.size() == 3) { - std::vector out_dims = {in_dims[0] * in_dims[1], in_dims[2]}; - output.Resize(make_ddim(out_dims)); - } - return output; -} - -// Reshape a rank-3 tensor from P x M x N to M x (P * N). -// (Warning: This requires transposing data and writes into new memory.) -// Identity op if the tensor is not of rank 3. -template -Tensor CombineBatchAndN(const DeviceContext& context, const Tensor& input) { - Tensor output; - auto in_dims = input.dims(); - if (in_dims.size() == 3) { - output.Resize({in_dims[1], in_dims[0], in_dims[2]}); - output.mutable_data(context.GetPlace()); - std::vector axis = {1, 0, 2}; - math::Transpose trans; - trans(context, input, &output, axis); - std::vector out_dims = {in_dims[1], in_dims[0] * in_dims[2]}; - output.Resize({in_dims[1], in_dims[0] * in_dims[2]}); - } else { - output.ShareDataWith(input); - } - return output; -} - -// Using dimensional constraints on matrix multiplication, it is -// straight-forward to check the following table for when X and Y -// are both matrices. -// -// transpose_X | False | True | False | True -// transpose_Y | False | False | True | True -// -----------+----------+----------+----------+----------- -// dX = | dOut Y^T | Y dOut^T | dOut Y | Y^T dOut^T -// dY = | X^T dOut | X dOut | dOut^T X | dOut^T X^T -// -// When X is a vector of size K, we treat it instead as a matrix of shape -// (1, K). Similarly, when Y is a vector of size K, we treat it instead as -// a matrix of shape (K, 1). -// -// When X and Y are both 3-dimensional tensors, then the first dimension -// the batch dimension can be ignored and the exact same formulas apply -// as for two matrices. -// -// Finally, when, e.g., X is a 3-dimensional tensor but Y is a matrix, we end -// up with formulas like -// -// dY_{ij} = \sum_{p, m} X_{pmi} dOut_{pmj} -// -// To handle this sort of scenario, we reshape X : P x M x K, dOut: P x M x N -// to X: (P * M) x K, dOut: (P * M) x N. -template -class MatMulGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext& context) const override { - const Tensor& x = *context.Input("X"); - const Tensor& y = *context.Input("Y"); - const Tensor& dout = *context.Input(framework::GradVarName("Out")); - Tensor* dx = context.Output(framework::GradVarName("X")); - Tensor* dy = context.Output(framework::GradVarName("Y")); - bool transpose_x = context.Attr("transpose_X"); - bool transpose_y = context.Attr("transpose_Y"); - - std::vector x_dims = vectorize(x.dims()); - std::vector y_dims = vectorize(y.dims()); - - // If X is a vector, reshape it to a matrix. - if (x_dims.size() == 1) { - x_dims.insert(x_dims.begin(), 1); - } - - // If Y is a vector, reshape it to a matrix. - if (y_dims.size() == 1) { - y_dims.push_back(1); - } - - int batch_count = 0; - // The first rank-2 dimensions are accumulated on the batch_count, and the - // last two dimensions are used for matrix multiplication. - if (x_dims.size() > 3) { - batch_count = accumulate(x_dims.begin(), x_dims.end() - 2, 1, - std::multiplies()); - } - // Fix the dOut dimensions. - int M = 0, N = 0, batchCountX = 0, batchCountY = 0; - - switch (x_dims.size()) { - case 2: - M = transpose_x ? x_dims[1] : x_dims[0]; - break; - case 3: - batchCountX = x_dims[0]; - M = transpose_x ? x_dims[2] : x_dims[1]; - break; - default: - batchCountX = batch_count; - size_t mat_s = x_dims.size() - 2; - M = transpose_x ? x_dims[mat_s + 1] : x_dims[mat_s]; - } - - switch (y_dims.size()) { - case 2: - N = transpose_y ? y_dims[0] : y_dims[1]; - break; - case 3: - batchCountY = y_dims[0]; - N = transpose_y ? y_dims[1] : y_dims[2]; - break; - default: - batchCountY = batch_count; - size_t mat_s = y_dims.size() - 2; - N = transpose_y ? y_dims[mat_s] : y_dims[mat_s + 1]; - } - if (batchCountX && batchCountY) { - PADDLE_ENFORCE_EQ( - batchCountX, batchCountY, - "When Input(X) and Input(Y) are both three dimensional, they " - "must have the same batch dimension."); - } - int batchCount = std::max(batchCountX, batchCountY); - std::vector dout_dims = {M, N}; - if (batchCount) { - if (x_dims.size() > 3) { - dout_dims.insert(dout_dims.begin(), x_dims.begin(), x_dims.end() - 2); - } else { - dout_dims.insert(dout_dims.begin(), batchCount); - } - } - Tensor X = Reshape(x, make_ddim(x_dims)); - Tensor Y = Reshape(y, make_ddim(y_dims)); - Tensor dOut = Reshape(dout, make_ddim(dout_dims)); - - auto& dev_ctx = context.template device_context(); - if (dx) { - dx->mutable_data(context.GetPlace()); - const Tensor& dOut_for_dX = - (x_dims.size() == 2 && y_dims.size() == 3) - ? CombineBatchAndN(dev_ctx, dOut) - : dOut; - if (x_dims.size() == 2 && y_dims.size() == 3) { - Y = transpose_y ? CombineBatchAndM(Y) - : CombineBatchAndN(dev_ctx, Y); - } - if (transpose_x) { - math::MatMulFunctor()( - dev_ctx, Y, transpose_y, dOut_for_dX, transpose_x, T(1), dx, T(0)); - } else { - math::MatMulFunctor()( - dev_ctx, dOut_for_dX, transpose_x, Y, !transpose_y, T(1), dx, T(0)); - } - } - - if (dy) { - dy->mutable_data(context.GetPlace()); - const Tensor& dOut_for_dY = (y_dims.size() == 2 && x_dims.size() == 3) - ? CombineBatchAndM(dOut) - : dOut; - if (y_dims.size() == 2 && x_dims.size() == 3) { - X = transpose_x ? CombineBatchAndN(dev_ctx, X) - : CombineBatchAndM(X); - dOut = CombineBatchAndM(dOut); - } - if (transpose_y) { - math::MatMulFunctor()( - dev_ctx, dOut_for_dY, transpose_y, X, transpose_x, T(1), dy, T(0)); - } else { - math::MatMulFunctor()( - dev_ctx, X, !transpose_x, dOut_for_dY, transpose_y, T(1), dy, T(0)); - } - } - } -}; -} // namespace matmul_detail - -using matmul_detail::MatMulKernel; -using matmul_detail::MatMulGradKernel; - -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/max_sequence_len_op.cc b/paddle/fluid/operators/max_sequence_len_op.cc index 4cd7c89b48..b1e69f375d 100644 --- a/paddle/fluid/operators/max_sequence_len_op.cc +++ b/paddle/fluid/operators/max_sequence_len_op.cc @@ -41,12 +41,16 @@ class MaxSeqenceLenOp : public framework::OperatorBase { class MaxSeqenceLenOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - MaxSeqenceLenOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("RankTable", "The lod_rank_table."); - AddOutput("Out", "The max sequence length."); - AddComment( - R"DOC(Calculate the max sequence length through lod_rank_table.)DOC"); + void Make() override { + AddInput("RankTable", "Input variable which is a LoDRankTable object"); + AddOutput("Out", "The max sequence length"); + AddComment(R"DOC( + Given a LoDRankTable object, this layer returns the max length of + a batch of sequences. In fact, a LoDRankTable object contains a list of + tuples() and the list is already sorted by + sequence length in descending order, so the operator just returns the + sequence length of the first tuple element +)DOC"); } }; diff --git a/paddle/fluid/operators/maxout_op.cc b/paddle/fluid/operators/maxout_op.cc index e2bcba5a5e..058115cb62 100644 --- a/paddle/fluid/operators/maxout_op.cc +++ b/paddle/fluid/operators/maxout_op.cc @@ -22,8 +22,7 @@ using framework::Tensor; class MaxOutOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxOutOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of maxout operator. " diff --git a/paddle/fluid/operators/mean_iou_op.cc b/paddle/fluid/operators/mean_iou_op.cc new file mode 100644 index 0000000000..a60f245f53 --- /dev/null +++ b/paddle/fluid/operators/mean_iou_op.cc @@ -0,0 +1,110 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/mean_iou_op.h" + +namespace paddle { +namespace operators { + +class MeanIoUOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Predictions"), + "Input (Predictions) of MeanIoU op should not be null."); + PADDLE_ENFORCE(ctx->HasInput("Labels"), + "Input (labels) of MeanIoU op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutMeanIou"), + "Output (OutMeanIou) of MeanIoU op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutWrong"), + "Output (OutWrong) of MeanIoU op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("OutCorrect"), + "Output (OutWrong) of MeanIoU op should not be null."); + + int64_t num_classes = + static_cast(ctx->Attrs().Get("num_classes")); + + ctx->SetOutputDim("OutMeanIou", {1}); + ctx->SetOutputDim("OutWrong", {num_classes}); + ctx->SetOutputDim("OutCorrect", {num_classes}); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Predictions")->type()), + ctx.GetPlace()); + } +}; + +class MeanIoUOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Predictions", + "(Tensor), A Tensor of prediction results for semantic labels" + " with type int32 or int64. The rank should be greater than 1."); + AddInput( + "Labels", + "(Tensor), A Tensor of ground truth labels with type int32 or int64." + "Its shape should be the same as Input(Predictions)."); + AddInput("InWrongs", + "(vector), A list of Tensor with shape " + "[num_classes]. They are used to collect wrong number among " + "batches. Empty list is also valid here.") + .AsDuplicable() + .AsDispensable(); + AddInput( + "InCorrects", + "(vector), A list of Tensor with shape " + "[num_classes]. They are used to collect correct number among batches. " + "Empty list is also valid here.") + .AsDuplicable() + .AsDispensable(); + AddInput("InMeanIou", + "(vector), A list of Tensor that Output(mean_iou) should " + "be added to. Empty list is also valid here.") + .AsDuplicable() + .AsDispensable(); + AddOutput("OutMeanIou", + "(vector), A Tensor representing the" + " mean intersection-over-union with shape [1]."); + AddOutput("OutWrong", "(Tensor), A Tensor with shape [num_classes]. "); + AddOutput("OutCorrect", "(Tensor), A Tensor with shape [num_classes]. "); + AddAttr("num_classes", "(int), The possible number of labels."); + + AddComment(R"DOC( +mean-IOU Operator. +Mean Intersection-Over-Union is a common evaluation metric for +semantic image segmentation, which first computes the IOU for each +semantic class and then computes the average over classes. +IOU is defined as follows: + IOU = true_positive / (true_positive + false_positive + false_negative). +It is based on pixel level area while "IOU Similarity Operator" +is based on area of rectangle. + +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(mean_iou, ops::MeanIoUOp, ops::MeanIoUOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(mean_iou, ops::MeanIoUKernel, + ops::MeanIoUKernel, + ops::MeanIoUKernel); diff --git a/paddle/fluid/operators/mean_iou_op.cu b/paddle/fluid/operators/mean_iou_op.cu new file mode 100644 index 0000000000..83bb4dde46 --- /dev/null +++ b/paddle/fluid/operators/mean_iou_op.cu @@ -0,0 +1,164 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/mean_iou_op.h" +#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/gpu_info.h" + +namespace paddle { +namespace operators { + +using platform::PADDLE_CUDA_NUM_THREADS; + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); \ + i += blockDim.x * gridDim.x) + +template +__global__ void CountCUDAKernel(const int num_classes, const int count, + const T* predictions, const T* labels, + int* wrong, int* correct) { + extern __shared__ int blcok_cache[]; + int* wrong_c = blcok_cache; + int* correct_c = blcok_cache + num_classes; + // init cache + for (int i = threadIdx.x; i < num_classes * 2; i += blockDim.x) { + blcok_cache[i] = 0; + } + __syncthreads(); + + T pred; + T label; + CUDA_1D_KERNEL_LOOP(i, count) { + pred = predictions[i]; + label = labels[i]; + if (pred == label) { + atomicAdd(correct_c + pred, 1); + } else { + atomicAdd(wrong_c + pred, 1); + atomicAdd(wrong_c + label, 1); + } + } + + __syncthreads(); + + for (int i = threadIdx.x; i < num_classes; i += blockDim.x) { + atomicAdd(wrong + i, wrong_c[i]); + atomicAdd(correct + i, correct_c[i]); + } +} + +__global__ void ComputeIoUCUDAKernel(const int num_classes, int* wrong, + int* correct, float* ious, float* iou) { + __shared__ int valid_count_c; + if (threadIdx.x == 0) { + valid_count_c = 0; + } + __syncthreads(); + CUDA_1D_KERNEL_LOOP(i, num_classes) { + int wrong_n = wrong[i]; + int correct_n = correct[i]; + int denominator = wrong_n + correct_n; + if (denominator > 0) { + atomicAdd(&valid_count_c, 1); + ious[i] = static_cast(correct_n) / denominator; + } else { + ious[i] = 0; + } + } + __syncthreads(); + if (threadIdx.x == 0) { + float iou_sum = 0; + for (int i = 0; i < num_classes; ++i) { + iou_sum += ious[i]; + } + iou[0] += iou_sum / valid_count_c; + } +} + +template +class MeanIoUCUDAOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& place = *ctx.template device_context() + .eigen_device(); + // get input and output tensor + auto* predictions = ctx.Input("Predictions"); + auto* labels = ctx.Input("Labels"); + auto* out_mean_iou = ctx.Output("OutMeanIou"); + auto* out_wrong = ctx.Output("OutWrong"); + auto* out_correct = ctx.Output("OutCorrect"); + int num_classes = static_cast(ctx.Attr("num_classes")); + + // Get data ptr + const T* predictions_data = predictions->data(); + const T* labels_data = labels->data(); + int* out_wrong_data = out_wrong->mutable_data(ctx.GetPlace()); + int* out_correct_data = out_correct->mutable_data(ctx.GetPlace()); + float* out_mean_iou_data = + out_mean_iou->mutable_data(ctx.GetPlace()); + + // Get Eigen tensor + auto out_mean_iou_t = EigenTensor::From(*out_mean_iou); + auto out_wrong_t = EigenTensor::From(*out_wrong); + auto out_correct_t = EigenTensor::From(*out_correct); + + // Temporary tensor + Tensor ious; + float* ious_data = ious.mutable_data( + {static_cast(num_classes)}, ctx.GetPlace()); + auto ious_t = EigenTensor::From(ious); + + // Init out_wrong, out_correct and out_mean_iou + out_wrong_t.device(place) = out_wrong_t.constant(0); + out_correct_t.device(place) = out_correct_t.constant(0); + out_mean_iou_t.device(place) = out_mean_iou_t.constant(0.0f); + + // collect pre wrong, correct and mean_iou + auto in_mean_ious = ctx.MultiInput("InMeanIou"); + for (int i = 0; i < in_mean_ious.size(); ++i) { + out_mean_iou_t.device(place) += + EigenTensor::From(*in_mean_ious[i]); + } + auto in_wrongs = ctx.MultiInput("InWrongs"); + for (int i = 0; i < in_wrongs.size(); ++i) { + out_wrong_t.device(place) += EigenTensor::From(*in_wrongs[i]); + } + auto in_corrects = ctx.MultiInput("InCorrects"); + for (int i = 0; i < in_corrects.size(); ++i) { + out_correct_t.device(place) += EigenTensor::From(*in_corrects[i]); + } + // compute + auto stream = ctx.cuda_device_context().stream(); + int block = PADDLE_CUDA_NUM_THREADS; + int grid = (predictions->numel() + block - 1) / block; + int cache_size = (num_classes * 2 + 1) * sizeof(int); + CountCUDAKernel<<>>( + num_classes, predictions->numel(), predictions_data, labels_data, + out_wrong_data, out_correct_data); + ctx.device_context().Wait(); + ComputeIoUCUDAKernel<<<1, block, 0, stream>>>(num_classes, out_wrong_data, + out_correct_data, ious_data, + out_mean_iou_data); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL(mean_iou, ops::MeanIoUCUDAOpKernel, + ops::MeanIoUCUDAOpKernel, + ops::MeanIoUCUDAOpKernel); diff --git a/paddle/fluid/operators/mean_iou_op.h b/paddle/fluid/operators/mean_iou_op.h new file mode 100644 index 0000000000..9fa00e60e0 --- /dev/null +++ b/paddle/fluid/operators/mean_iou_op.h @@ -0,0 +1,117 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { +using Tensor = framework::Tensor; + +template +using EigenTensor = framework::EigenTensor; + +template +class MeanIoUKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto& place = *ctx.template device_context() + .eigen_device(); + // get input and output tensor + auto* predictions = ctx.Input("Predictions"); + auto* labels = ctx.Input("Labels"); + auto* out_mean_iou = ctx.Output("OutMeanIou"); + auto* out_wrong = ctx.Output("OutWrong"); + auto* out_correct = ctx.Output("OutCorrect"); + int num_classes = static_cast(ctx.Attr("num_classes")); + + // get data ptr + const T* predictions_data = predictions->data(); + const T* labels_data = labels->data(); + float* out_mean_iou_data = + out_mean_iou->mutable_data(ctx.GetPlace()); + int* out_wrong_data = out_wrong->mutable_data(ctx.GetPlace()); + int* out_correct_data = out_correct->mutable_data(ctx.GetPlace()); + + // get eigen tensor + auto out_mean_iou_t = EigenTensor::From(*out_mean_iou); + auto out_wrong_t = EigenTensor::From(*out_wrong); + auto out_correct_t = EigenTensor::From(*out_correct); + + // Tmp tensor + Tensor denominator; + Tensor valid_count; + Tensor iou_sum; + + // get data ptr of tmp tensor + int* denominator_data = denominator.mutable_data( + {static_cast(num_classes)}, ctx.GetPlace()); + int* valid_count_data = valid_count.mutable_data({1}, ctx.GetPlace()); + float* iou_sum_data = iou_sum.mutable_data({1}, ctx.GetPlace()); + + // get eigen tensor of tmp tensor + auto denominator_t = EigenTensor::From(denominator); + auto valid_count_t = EigenTensor::From(valid_count); + auto iou_sum_t = EigenTensor::From(iou_sum); + + // init out_wrong, out_correct and out_mean_iou + out_wrong_t = out_wrong_t.constant(0); + out_correct_t = out_correct_t.constant(0); + out_mean_iou_t = out_mean_iou_t.constant(0); + + // collect pre wrong, correct and mean_iou + auto in_mean_ious = ctx.MultiInput("InMeanIou"); + for (size_t i = 0; i < in_mean_ious.size(); ++i) { + out_mean_iou_t.device(place) += + EigenTensor::From(*in_mean_ious[i]); + } + auto in_wrongs = ctx.MultiInput("InWrongs"); + for (size_t i = 0; i < in_wrongs.size(); ++i) { + out_wrong_t.device(place) += EigenTensor::From(*in_wrongs[i]); + } + auto in_corrects = ctx.MultiInput("InCorrects"); + for (size_t i = 0; i < in_corrects.size(); ++i) { + out_correct_t.device(place) += EigenTensor::From(*in_corrects[i]); + } + + // compute + for (int64_t i = 0; i < predictions->numel(); ++i) { + if (predictions_data[i] == labels_data[i]) { + out_correct_data[predictions_data[i]] += 1; + } else { + out_wrong_data[labels_data[i]] += 1; + out_wrong_data[predictions_data[i]] += 1; + } + } + + denominator_t = out_wrong_t + out_correct_t; + valid_count_t = + (denominator_t > denominator_t.constant(0.0f)).cast().sum(); + + for (int i = 0; i < num_classes; ++i) { + if (denominator_data[i] == 0) { + denominator_data[i] = 1; + } + } + + iou_sum_t = + (out_correct_t.cast() / denominator_t.cast()).sum(); + out_mean_iou_data[0] += (iou_sum_data[0] / valid_count_data[0]); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/mean_op.cc b/paddle/fluid/operators/mean_op.cc index a134796bfc..9e0bebd17c 100644 --- a/paddle/fluid/operators/mean_op.cc +++ b/paddle/fluid/operators/mean_op.cc @@ -32,14 +32,11 @@ class MeanOp : public framework::OperatorWithKernel { class MeanOpMaker : public framework::OpProtoAndCheckerMaker { public: - MeanOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "The input of mean op"); - AddOutput("Out", "The output of mean op"); + void Make() override { + AddInput("X", "(Tensor) The input of mean op"); + AddOutput("Out", "(Tensor) The output of mean op").Reuse("X"); AddComment(R"DOC( -Mean Operator. - -Out is a scalar which is the mean of all elements in X. +Mean Operator calculates the mean of all elements in X. )DOC"); } diff --git a/paddle/fluid/operators/merge_ids_op.cc b/paddle/fluid/operators/merge_ids_op.cc new file mode 100644 index 0000000000..c6ec4ab047 --- /dev/null +++ b/paddle/fluid/operators/merge_ids_op.cc @@ -0,0 +1,128 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/merge_ids_op.h" + +namespace paddle { +namespace operators { + +class MergeIdsOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Ids", "(LoDTensor) the input ids with shape{batch_num, 1}"); + AddInput( + "X", + "(LoDTensors) multi input tensor with shape{batch_num, N}, N is the " + "size of embedding table") + .AsDuplicable(); + AddOutput("Out", "(LoDTensor) The merged outputs of the input tensors."); + + AddComment(R"DOC( +Merge multi LoDTensor's into one according to Ids's shard num. + + +split_ids_op -> prefetch_op -> merge_ids_op + + +merge_ids_op should be used after split_ids_op and prefetch_op, split_ids_op + will split input Ids into multiple tensors according to Id's shard number. +prefetch_op will send them to parameter server to prefetch embedding value +back. During split, the order of ids is disordered. In merge_ids_op we use +the original Ids to restore the order of the fetched embedding value and + also pass the lod information to the merged output. + + +Example: + + Ids = [1,2,3,4,5,6] # 3 shared + +split_ids_op -> + + Id0 = [3, 6] # id % 3 == 0 + Id1 = [1, 4] # id % 3 == 1 + Id2 = [2, 5] # id % 3 == 2 + +prefetch_op -> + + X0 = [[0.3 0.3] # 3 + [0.6 0.6]] # 6 + X1 = [[0.1 0.1] # 1 + [0.4 0.4]] # 4 + X2 = [[0.2 0.2] # 2 + [0.5 0.5]] # 5 + +merge_ids_op -> + + Out = [[0.1 0.1] # 1 + [0.2 0.2] # 2 + [0.3 0.3] # 3 + [0.4 0.4] # 4 + [0.5 0.5] # 5 + [0.6 0.6]] # 6 +)DOC"); + } +}; + +class MergeIdsOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Ids"), "MergeIdsOp must has input Ids."); + PADDLE_ENFORCE(ctx->HasInputs("X"), "MergeIdsOp must has input X."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "MergeIdsOp must has output Out."); + + auto ids_var_type = ctx->GetInputsVarType("Ids").front(); + auto ids_dims = ctx->GetInputDim("Ids"); + if (ids_var_type == framework::proto::VarType::LOD_TENSOR) { + PADDLE_ENFORCE_EQ(ids_dims.size(), 2); + PADDLE_ENFORCE_EQ(ids_dims[1], 1); + } + auto x_var_type = ctx->GetInputsVarType("X"); + for (auto &var_type : x_var_type) { + PADDLE_ENFORCE_EQ(var_type, framework::proto::VarType::LOD_TENSOR, + "input X only support lod tensors"); + } + ctx->ShareLoD("Ids", "Out"); + } + + private: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType( + ctx.MultiInput("X").front()->type()), + ctx.GetPlace()); + } +}; + +class MergeIdsOpInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { + auto *input_var = block->Var(op_desc.Input("Ids")[0]); + for (auto &out_var : op_desc.Output("Out")) { + block->Var(out_var)->SetType(input_var->GetType()); + } + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(merge_ids, ops::MergeIdsOp, ops::MergeIdsOpMaker, + ops::MergeIdsOpInferVarType); +REGISTER_OP_CPU_KERNEL( + merge_ids, ops::MergeIdsOpKernel); diff --git a/paddle/fluid/operators/merge_ids_op.h b/paddle/fluid/operators/merge_ids_op.h new file mode 100644 index 0000000000..83712a8519 --- /dev/null +++ b/paddle/fluid/operators/merge_ids_op.h @@ -0,0 +1,92 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" + +namespace paddle { +namespace operators { + +template +class MergeIdsOpKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext &ctx) const override { + auto place = ctx.GetPlace(); + if (!platform::is_cpu_place(place)) { + PADDLE_THROW("MergeIds do not support GPU kernel"); + } + VLOG(3) << "run in MergeIdsOpKernel"; + + const auto *ids_var = ctx.InputVar("Ids"); + PADDLE_ENFORCE(ids_var->IsType(), + "only support to merge Ids of LoDTensor"); + + const auto &ids_tensor = ids_var->Get(); + const auto &ids_dims = ids_tensor.dims(); + const int64_t *ids = ids_tensor.data(); + + auto x_tensors = ctx.MultiInput("X"); + + auto *out = ctx.Output("Out"); + + int batch_size = 0; + int embedding_size = 0; + for (auto &input : x_tensors) { + if (framework::product(input->dims()) != 0) { + if (embedding_size == 0) { + embedding_size = input->dims()[1]; + } + PADDLE_ENFORCE_EQ(embedding_size, input->dims()[1], + "embedding size of all input should be the same"); + batch_size += input->dims()[0]; + } + } + PADDLE_ENFORCE_EQ( + batch_size, ids_dims[0], + "the batch size of ids and merged embedding value should be the same"); + + const size_t shard_num = x_tensors.size(); + + if (shard_num == 1) { + VLOG(3) << "only one shard, we can copy the data directly"; + TensorCopy(*x_tensors[0], place, out); + } else { + std::vector in_indexs(shard_num, 0); + auto *out_data = out->mutable_data( + framework::make_ddim({batch_size, embedding_size}), place); + // copy data from ins[shard_num] to out. + for (int i = 0; i < ids_dims[0]; ++i) { + int64_t id = ids[i]; + size_t shard_id = static_cast(id) % shard_num; + int index = in_indexs[shard_id]; + memcpy(out_data + embedding_size * i, + x_tensors[shard_id]->data() + index * embedding_size, + sizeof(T) * embedding_size); + in_indexs[shard_id] += 1; + } + + for (size_t i = 0; i < shard_num; ++i) { + PADDLE_ENFORCE_EQ(in_indexs[i], x_tensors[i]->dims()[0], + "after merge, all data in x_tensor should be used"); + } + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/merge_lod_tensor_op.cc b/paddle/fluid/operators/merge_lod_tensor_op.cc index 4ebf20cbba..2dc1467b0d 100644 --- a/paddle/fluid/operators/merge_lod_tensor_op.cc +++ b/paddle/fluid/operators/merge_lod_tensor_op.cc @@ -44,8 +44,10 @@ class MergeLoDTensorOp : public framework::OperatorBase { scope.FindVar(Output("Out"))->GetMutable(); auto level = static_cast(Attr("level")); - auto &mask_dim = mask.dims(); + PADDLE_ENFORCE(in_true.numel() || in_false.numel(), + "Input(InTrue) or Input(InFalse) should be initialized."); + auto &mask_dim = mask.dims(); std::unique_ptr cpu_mask{new framework::LoDTensor()}; if (platform::is_cpu_place(mask.place())) { cpu_mask->ShareDataWith(mask); @@ -59,19 +61,27 @@ class MergeLoDTensorOp : public framework::OperatorBase { } auto *mask_data = cpu_mask->data(); - int rank = in_true.dims().size(); - platform::Place place = in_true.place(); - std::type_index data_type = in_true.type(); - framework::DDim in_true_dims = - framework::slice_ddim(in_true.dims(), 1, rank); - + platform::Place place = dev_place; int64_t batch_size = in_true.dims()[0] + in_false.dims()[0]; - auto in_true_dim_vec = framework::vectorize(in_true_dims); - in_true_dim_vec.insert(in_true_dim_vec.begin(), batch_size); + std::type_index data_type = + in_true.IsInitialized() ? in_true.type() : in_false.type(); + int rank; + framework::DDim in_dims; + if (in_true.IsInitialized()) { + rank = in_true.dims().size(); + in_dims = framework::slice_ddim(in_true.dims(), 1, rank); + } else { + rank = in_false.dims().size(); + in_dims = framework::slice_ddim(in_false.dims(), 1, rank); + } + + auto in_dim_vec = framework::vectorize(in_dims); + in_dim_vec.insert(in_dim_vec.begin(), batch_size); - framework::DDim out_dims = framework::make_ddim(in_true_dim_vec); + framework::DDim out_dims = framework::make_ddim(in_dim_vec); out->Resize(out_dims); + out->mutable_data(place, data_type); auto *out_lod = out->mutable_lod(); @@ -121,8 +131,7 @@ class MergeLoDTensorOp : public framework::OperatorBase { class MergeLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - MergeLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input LoDTensor, contains complete lod information to " "construct the output"); diff --git a/paddle/fluid/operators/minus_op.cc b/paddle/fluid/operators/minus_op.cc index a302b24560..34571a38a1 100644 --- a/paddle/fluid/operators/minus_op.cc +++ b/paddle/fluid/operators/minus_op.cc @@ -48,8 +48,7 @@ class MinusOp : public framework::OperatorWithKernel { class MinusOpMaker : public framework::OpProtoAndCheckerMaker { public: - MinusOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The left tensor of minus operator."); AddInput("Y", "The right tensor of minus operator."); AddOutput("Out", "The output tensor of minus operator."); diff --git a/paddle/fluid/operators/mkldnn_activation_op.h b/paddle/fluid/operators/mkldnn_activation_op.h index f26a165b5a..85664623d7 100644 --- a/paddle/fluid/operators/mkldnn_activation_op.h +++ b/paddle/fluid/operators/mkldnn_activation_op.h @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include + #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/detail/safe_ref.h" @@ -60,52 +62,5 @@ class MKLDNNActivationGradKernel } }; -namespace { // NOLINT -framework::OpKernelType GetKernelType( - const framework::ExecutionContext& ctx, - const framework::OperatorWithKernel& oper) { - framework::LibraryType library{framework::LibraryType::kPlain}; -#ifdef PADDLE_WITH_MKLDNN - if (library == framework::LibraryType::kPlain && - platform::CanMKLDNNBeUsed(ctx)) { - library = framework::LibraryType::kMKLDNN; - } -#endif - framework::DataLayout layout = framework::DataLayout::kAnyLayout; - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.GetPlace(), layout, library); -} -} // anonymous namespace - -class ActivationWithMKLDNNOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - ctx->SetOutputDim("Out", ctx->GetInputDim("X")); - ctx->ShareLoD("X", /*->*/ "Out"); - } - - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return GetKernelType(ctx, *this); - } -}; - -class ActivationWithMKLDNNOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("Out")); - } - - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - return GetKernelType(ctx, *this); - } -}; - } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/modified_huber_loss_op.cc b/paddle/fluid/operators/modified_huber_loss_op.cc index 3a0fc74584..35db4c1ad1 100644 --- a/paddle/fluid/operators/modified_huber_loss_op.cc +++ b/paddle/fluid/operators/modified_huber_loss_op.cc @@ -39,8 +39,7 @@ class ModifiedHuberLossOp : public framework::OperatorWithKernel { class ModifiedHuberLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - ModifiedHuberLossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of modified huber loss op. " "X is 2-D tensor with shape [batch_size, 1]."); diff --git a/paddle/fluid/operators/momentum_op.cc b/paddle/fluid/operators/momentum_op.cc index f13ec53905..5f43c58108 100644 --- a/paddle/fluid/operators/momentum_op.cc +++ b/paddle/fluid/operators/momentum_op.cc @@ -62,8 +62,7 @@ class MomentumOp : public framework::OperatorWithKernel { class MomentumOpMaker : public framework::OpProtoAndCheckerMaker { public: - MomentumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter that has to be updated"); @@ -99,7 +98,7 @@ The update equations are as follows: $$ velocity = mu * velocity + gradient \\ if (use\_nesterov): \\ - param = param - gradient * learning\_rate + mu * velocity * learning\_rate \\ + param = param - (gradient + mu * velocity) * learning\_rate \\ else: \\ param = param - learning\_rate * velocity. \\ $$ diff --git a/paddle/fluid/operators/momentum_op.cu b/paddle/fluid/operators/momentum_op.cu index 5eb9d99502..a3932db1f3 100644 --- a/paddle/fluid/operators/momentum_op.cu +++ b/paddle/fluid/operators/momentum_op.cu @@ -30,7 +30,7 @@ __global__ void MomentumKernel(const T* p, const T* g, const T* v, T g_val = g[i]; T v_new = v[i] * mu + g_val; v_out[i] = v_new; - p_out[i] = p[i] - (g_val - v_new * mu) * lr; + p_out[i] = p[i] - (g_val + v_new * mu) * lr; } } else { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; diff --git a/paddle/fluid/operators/momentum_op.h b/paddle/fluid/operators/momentum_op.h index 04a1929b84..264726040f 100644 --- a/paddle/fluid/operators/momentum_op.h +++ b/paddle/fluid/operators/momentum_op.h @@ -46,7 +46,7 @@ class MomentumOpKernel : public framework::OpKernel { v_out = v * mu + g; if (use_nesterov) { - p_out = p - (g - v_out * mu) * lr[0]; + p_out = p - (g + v_out * mu) * lr[0]; } else { p_out = p - lr[0] * v_out; } diff --git a/paddle/fluid/operators/mul_mkldnn_op.cc b/paddle/fluid/operators/mul_mkldnn_op.cc deleted file mode 100644 index a5f3a98f67..0000000000 --- a/paddle/fluid/operators/mul_mkldnn_op.cc +++ /dev/null @@ -1,197 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "mkldnn.hpp" -#include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/operators/mul_op.h" -#include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/platform/mkldnn_helper.h" - -namespace paddle { -namespace operators { - -using paddle::framework::Tensor; -using paddle::platform::MKLDNNDeviceContext; - -template -mkldnn::memory::desc type(const std::vector& dims, Format&& f) { - return platform::MKLDNNMemDesc(dims, mkldnn::memory::data_type::f32, f); -} - -template -class MulMKLDNNOpKernel : public paddle::framework::OpKernel { - void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - - auto& dev_ctx = ctx.template device_context(); - auto mkldnn_engine = dev_ctx.GetEngine(); - - auto input = ctx.Input("X"); - auto weight = ctx.Input("Y"); - - PADDLE_ENFORCE(input->dims().size() & (2 | 4), - "Input must be with 2 or 4 dimensions, i.e. NC or NCHW"); - PADDLE_ENFORCE(weight->dims().size() & (2 | 4), - "Weights must be with 2 or 4 dimensions, i.e. OI or OIHW"); - - std::vector w_tz = paddle::framework::vectorize2int(weight->dims()); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - - auto src_md = - src_tz.size() != 2 - ? type(src_tz, mkldnn::memory::format::nchw) - : type({src_tz[0], src_tz[1]}, mkldnn::memory::format::nc); - - auto dst_md = type({src_tz[0], w_tz[1]}, mkldnn::memory::format::nc); - - auto weights_md = - src_tz.size() != 2 - ? type({w_tz[1], src_tz[1], src_tz[2], src_tz[3]}, - mkldnn::memory::format::oihw) - : type({w_tz[1], src_tz[1]}, mkldnn::memory::format::oi); - - auto output = ctx.Output("Out"); - T* output_data = output->mutable_data(ctx.GetPlace()); - - const std::string key = ctx.op().Output("Out"); - const std::string key_fc_pd = key + "@mul_pd"; - - const T* input_data = input->data(); - const T* w_data = weight->data(); - - auto dst_memory = mkldnn::memory({dst_md, mkldnn_engine}, output_data); - - auto src_memory = mkldnn::memory({src_md, mkldnn_engine}, - platform::to_void_cast(input_data)); - - auto weights_memory = mkldnn::memory({weights_md, mkldnn_engine}, - platform::to_void_cast(w_data)); - - auto pd = platform::MKLDNNFwdPrimitiveDesc( - mkldnn_engine, src_md, weights_md, dst_md); - - dev_ctx.SetBlob(key_fc_pd, pd); - - auto forward = mkldnn::inner_product_forward(*pd, src_memory, - weights_memory, dst_memory); - - std::vector pipeline = {forward}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } -}; - -template -class MulMKLDNNGradOpKernel : public paddle::framework::OpKernel { - public: - void Compute(const paddle::framework::ExecutionContext& ctx) const override { - PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), - "It must use CPUPlace."); - - auto& dev_ctx = ctx.template device_context(); - auto mkldnn_engine = dev_ctx.GetEngine(); - - const Tensor* input = ctx.Input("X"); - const Tensor* w = ctx.Input("Y"); - - const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); - Tensor* input_grad = ctx.Output(framework::GradVarName("X")); - Tensor* w_grad = ctx.Output(framework::GradVarName("Y")); - - const std::string key = ctx.op().Input("Out"); - const std::string key_fc_pd = key + "@mul_pd"; - - const T* input_data = input->data(); - const T* w_data = w->data(); - const T* out_grad_data = out_grad->data(); - T* input_grad_data = nullptr; - T* w_grad_data = nullptr; - - if (input_grad) { - input_grad_data = input_grad->mutable_data(ctx.GetPlace()); - } - if (w_grad) { - w_grad_data = w_grad->mutable_data(ctx.GetPlace()); - } - - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector w_tz = paddle::framework::vectorize2int(w->dims()); - - auto src_md = - src_tz.size() != 2 - ? type(src_tz, mkldnn::memory::format::nchw) - : type({src_tz[0], src_tz[1]}, mkldnn::memory::format::nc); - - auto dst_md = type({src_tz[0], w_tz[1]}, mkldnn::memory::format::nc); - - auto weights_md = - src_tz.size() != 2 - ? type({w_tz[1], src_tz[1], src_tz[2], src_tz[3]}, - mkldnn::memory::format::oihw) - : type({w_tz[1], src_tz[1]}, mkldnn::memory::format::oi); - - auto src_memory = mkldnn::memory({src_md, mkldnn_engine}, - platform::to_void_cast(input_data)); - - auto dst_memory = mkldnn::memory({dst_md, mkldnn_engine}, - platform::to_void_cast(out_grad_data)); - - auto weight_memory = mkldnn::memory({weights_md, mkldnn_engine}, - platform::to_void_cast(w_data)); - - auto pd = - std::static_pointer_cast( - dev_ctx.GetBlob(key_fc_pd)); - - PADDLE_ENFORCE(pd != nullptr, "Fail to find pd in device context"); - - if (w_grad) { - auto weights_grad_memory = mkldnn::memory( - {weights_md, mkldnn_engine}, platform::to_void_cast(w_grad_data)); - - auto bwd_weight_pd = platform::MKLDNNBwdPrimitiveDesc< - mkldnn::inner_product_backward_weights>(mkldnn_engine, *pd, src_md, - weights_md, dst_md); - - auto bwd_weights_prim = mkldnn::inner_product_backward_weights( - bwd_weight_pd, src_memory, dst_memory, weights_grad_memory); - - std::vector pipeline{bwd_weights_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } - - if (input_grad) { - auto src_grad_memory = mkldnn::memory( - {src_md, mkldnn_engine}, platform::to_void_cast(input_grad_data)); - - auto bwd_data_pd = - platform::MKLDNNBwdPrimitiveDesc( - mkldnn_engine, *pd, src_md, weights_md, dst_md); - - auto bwd_data_prim = mkldnn::inner_product_backward_data( - bwd_data_pd, dst_memory, weight_memory, src_grad_memory); - - std::vector pipeline{bwd_data_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); - } - } -}; -} // namespace operators -} // namespace paddle - -REGISTER_OP_KERNEL(mul, MKLDNN, ::paddle::platform::CPUPlace, - paddle::operators::MulMKLDNNOpKernel); - -REGISTER_OP_KERNEL(mul_grad, MKLDNN, ::paddle::platform::CPUPlace, - paddle::operators::MulMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/mul_op.cc b/paddle/fluid/operators/mul_op.cc index 6903cf83b4..51993398bd 100644 --- a/paddle/fluid/operators/mul_op.cc +++ b/paddle/fluid/operators/mul_op.cc @@ -16,10 +16,6 @@ limitations under the License. */ #include #include -#ifdef PADDLE_WITH_MKLDNN -#include "paddle/fluid/platform/mkldnn_helper.h" -#endif - namespace paddle { namespace operators { @@ -76,28 +72,11 @@ class MulOp : public framework::OperatorWithKernel { ctx->SetOutputDim("Out", framework::make_ddim(output_dims)); ctx->ShareLoD("X", /*->*/ "Out"); } - - private: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - framework::LibraryType library{framework::LibraryType::kPlain}; -#ifdef PADDLE_WITH_MKLDNN - if (library == framework::LibraryType::kPlain && - platform::CanMKLDNNBeUsed(ctx)) { - library = framework::LibraryType::kMKLDNN; - } -#endif - framework::DataLayout layout{framework::DataLayout::kAnyLayout}; - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - layout, library); - } }; class MulOpMaker : public framework::OpProtoAndCheckerMaker { public: - MulOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor), The first input tensor of mul op."); AddInput("Y", "(Tensor), The second input tensor of mul op."); AddOutput("Out", "(Tensor), The output tensor of mul op."); @@ -121,9 +100,6 @@ class MulOpMaker : public framework::OpProtoAndCheckerMaker { )DOC") .SetDefault(1) .EqualGreaterThan(1); - AddAttr("use_mkldnn", - "(bool, default false) Only used in mkldnn kernel") - .SetDefault(false); AddAttr( "y_num_col_dims", R"DOC((int, default 1), The mul_op can take tensors with more than two, @@ -178,22 +154,6 @@ class MulGradOp : public framework::OperatorWithKernel { ctx->SetOutputDim(y_grad_name, y_dims); } } - - private: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const override { - framework::LibraryType library{framework::LibraryType::kPlain}; -#ifdef PADDLE_WITH_MKLDNN - if (library == framework::LibraryType::kPlain && - platform::CanMKLDNNBeUsed(ctx)) { - library = framework::LibraryType::kMKLDNN; - } -#endif - framework::DataLayout layout{framework::DataLayout::kAnyLayout}; - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - layout, library); - } }; } // namespace operators diff --git a/paddle/fluid/operators/multiplex_op.cc b/paddle/fluid/operators/multiplex_op.cc index b698c1bf8a..18ad46cb5e 100644 --- a/paddle/fluid/operators/multiplex_op.cc +++ b/paddle/fluid/operators/multiplex_op.cc @@ -61,28 +61,47 @@ class MultiplexOp : public framework::OperatorWithKernel { class MultiplexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MultiplexOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("Ids", "The index tensor of multiplex operator."); - AddInput("X", "The candidate tensors of multiplex operator.") + void Make() override { + AddInput("Ids", + "Tensor, index variable which is a 2-D tensor with shape " + "[M, 1] where M is the batch size."); + AddInput("X", + "A list of variables to gather from. All variables have the same " + "shape and the rank is at least 2.") .AsDuplicable(); AddOutput("Out", "The output tensor of multiplex operator."); AddComment(R"DOC( -Multiplex Operator. - -Multiplex multiple tensors according to the index provided by the index tensor. - -Ids: the index tensor. -X[0 : N - 1]: the candidate tensors for output (N >= 2). -For each index i from 0 to batchSize - 1, the output is the i-th row of the +Referring to the given index variable, this layer selects rows from the +input variables to construct a multiplex variable. Assuming that there are +:math:`m` input variables and :math:`I_i` represents the i-th input +variable and :math:`i` is in [0, :math:`m`). All input variables are +tensors with same shape [:math:`d_0`, :math:`d_1`, ..., :math:`d_R`]. +Please note that rank of the input tensor should be at least 2. Each input +variable will be treated as a 2-D matrix with shape [:math:`M`, :math:`N`] +where :math:`M` for :math:`d_0` and :math:`N` for :math:`d_1` * :math:`d_2` +* ... * :math:`d_R`. Let :math:`I_i[j]` be the j-th row of the i-th input +variable. The given index variable should be a 2-D tensor with shape +[:math:`M`, 1]. Let `ID[i]` be the i-th index value of the index variable. +Then the output variable will be a tensor with shape [:math:`d_0`, +:math:`d_1`, ..., :math:`d_R`]. If we treat the output tensor as a 2-D +matrix with shape [:math:`M`, :math:`N`] and let :math:`O[i]` be the i-th +row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`. + +* Ids: the index tensor. + +* X[0 : N - 1]: the candidate tensors for output (N >= 2). + +* For each index i from 0 to batchSize - 1, the output is the i-th row of the the (Ids[i])-th tensor. For i-th row of the output tensor: -$$y[i] = x_{k}[i]$$ +$$ +y[i] = x_{k}[i] +$$ -where `y` is the output tensor, `x_{k}` is the k-th input tensor, -and `k = Ids[i]`. +where $y$ is the output tensor, $x_{k}$ is the k-th input tensor, +and $k = Ids[i]$. )DOC"); } diff --git a/paddle/fluid/operators/nccl_op.cc b/paddle/fluid/operators/nccl_op.cc index 5e4ed886b1..0018139cb0 100644 --- a/paddle/fluid/operators/nccl_op.cc +++ b/paddle/fluid/operators/nccl_op.cc @@ -76,8 +76,7 @@ class NCCLInitOpShapeInference : public framework::InferShapeBase { class NCCLInitOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLInitOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kParallelScopes, "The working place of parallel do."); AddOutput("Communicator", "Create Communicator for communicating between gpus"); @@ -118,8 +117,7 @@ class NCCLAllReduceOp : public framework::OperatorWithKernel { // AllReduceOp class NCCLAllReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLAllReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of AllReduce op"); AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of AllReduce op"); @@ -165,8 +163,7 @@ class NCCLReduceOp : public framework::OperatorWithKernel { // ReduceOp class NCCLReduceOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of Reduce op"); AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of Reduce op"); @@ -214,8 +211,7 @@ class NCCLBcastOp : public framework::OperatorWithKernel { // BcastOp class NCCLBcastOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCCLBcastOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of BcastSend op"); AddInput("Communicator", "Communicator for communicating between gpus"); AddOutput("Out", "The output of Bcast"); diff --git a/paddle/fluid/operators/nccl_op_test.cu.cc b/paddle/fluid/operators/nccl_op_test.cu.cc index ef54d79fdf..d5fb7a12e5 100644 --- a/paddle/fluid/operators/nccl_op_test.cu.cc +++ b/paddle/fluid/operators/nccl_op_test.cu.cc @@ -19,7 +19,6 @@ limitations under the License. */ #include // NOLINT #include -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/program_desc.h" @@ -27,6 +26,7 @@ limitations under the License. */ #include "paddle/fluid/platform/device_context.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/gpu_info.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" USE_NO_KERNEL_OP(ncclInit); diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index 192bdf8ea5..e471f04662 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -75,8 +75,7 @@ class NCEOp : public framework::OperatorWithKernel { class NCEOpMaker : public framework::OpProtoAndCheckerMaker { public: - NCEOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Input", "(Tensor) A tensor of shape [batch_size, dim]."); AddInput( "Label", @@ -129,8 +128,10 @@ class NCEOpMaker : public framework::OpProtoAndCheckerMaker { "user should avoid setting this attribute.") .SetDefault({}); AddComment(R"DOC( -Compute and return the noise-contrastive estimation training loss. -See [Noise-contrastive estimation: A new estimation principle for unnormalized statistical models](http://www.jmlr.org/proceedings/papers/v9/gutmann10a/gutmann10a.pdf). +Compute and return the noise-contrastive estimation training loss. See +`Noise-contrastive estimation: A new estimation principle for unnormalized +statistical models + `_. By default this operator uses a uniform distribution for sampling. )DOC"); } diff --git a/paddle/fluid/operators/norm_op.cc b/paddle/fluid/operators/norm_op.cc index 30a991224f..aa19c62c83 100644 --- a/paddle/fluid/operators/norm_op.cc +++ b/paddle/fluid/operators/norm_op.cc @@ -16,41 +16,34 @@ limitations under the License. */ namespace paddle { namespace operators { -template class NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput( - "X", - "(Tensor) The input tensor of norm operator. " - "The format of input tensor is NCHW. Where N is batch size, C is the " - "number of channels, H and W is the height and width of feature."); - AddInput("Scale", - "(Tensor) The input tensor of norm operator. " - "The format of input tensor is C * 1."); - AddAttr("epsilon", - "(float, default 1e-10) Constant " - "for numerical stability.") + void Make() override { + AddInput("X", "(Tensor) A tensor of rank >= axis."); + AddAttr("axis", + "The axis on which to apply normalization. If axis < 0, " + "the dimension to normalization is rank(X) + axis. -1 is " + "the last dimension."); + AddAttr("epsilon", + "(float, default 1e-10) The epsilon value is used " + "to avoid division by zero.") .SetDefault(1.0e-10f); - AddOutput("Out", - "(Tensor) The output tensor of norm operator." - "N * M." - "M = C * H * W"); + AddOutput("Norm", + "(Tensor) A tensor saved the `sqrt(sum(x) + epsion)` will " + "be used in backward kernel.") + .AsIntermediate(); + AddOutput("Out", "(Tensor) A tensor of the same shape as X."); AddComment(R"DOC( - "Input shape: $(N, C, H, W)$ - Scale shape: $(C, 1)$ - Output shape: $(N, C, H, W)$ - Where - forward - $$ - [\frac {x_{1}}{\sqrt{\sum{x_{i}^{2}}}} \frac {x_{2}}{\sqrt{\sum{x_{i}^{2}}}} \frac {x_{3}}{\sqrt{\sum{x_{i}^{2}}}} \cdot \cdot \cdot \frac {x_{n}}{\sqrt{\sum{x_{i}^{2}}}}] - $$ - backward - $$ - \frac{\frac{\mathrm{d}L }{\mathrm{d}y_{1}} - \frac {x_{1}\sum {\frac{\mathrm{d} L}{\mathrm{d} y_{j}}}x_{j}}{\sum x_{j}^{2}} }{\sqrt{\sum{x_{j}^{2}}}} - $$ - )DOC"); + +Given a tensor, apply 2-normalization along the provided axis. + +$$ +y = \frac{x}{ \sqrt{\sum {x^2} + epsion }} +$$ + +where, $\sum {x^2}$ is calculated along the `axis` dimension. + +)DOC"); } }; @@ -59,15 +52,15 @@ class NormOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of NormOp" - "should not be null."); - PADDLE_ENFORCE(ctx->HasInput("Scale"), - "Input(Scale) of NormOp" - "should not be null."); + "Input(X) of NormOp should not be null."); PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of NormOp should not be null."); - auto in_x_dims = ctx->GetInputDim("X"); - ctx->SetOutputDim("Out", in_x_dims); + auto xdim = ctx->GetInputDim("X"); + ctx->SetOutputDim("Out", xdim); + int axis = ctx->Attrs().Get("axis"); + if (axis < 0) axis = xdim.size() + axis; + xdim[axis] = 1; + ctx->SetOutputDim("Norm", xdim); } }; @@ -85,12 +78,12 @@ class NormOpGrad : public framework::OperatorWithKernel { } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(norm, ops::NormOp, ops::NormOpMaker, +using CPU = paddle::platform::CPUDeviceContext; + +REGISTER_OPERATOR(norm, ops::NormOp, ops::NormOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(norm_grad, ops::NormOpGrad); -REGISTER_OP_CPU_KERNEL( - norm, ops::NormKernel, - ops::NormKernel); -REGISTER_OP_CPU_KERNEL( - norm_grad, ops::NormGradKernel, - ops::NormGradKernel); +REGISTER_OP_CPU_KERNEL(norm, ops::NormKernel, + ops::NormKernel); +REGISTER_OP_CPU_KERNEL(norm_grad, ops::NormGradKernel, + ops::NormGradKernel); diff --git a/paddle/fluid/operators/norm_op.cu b/paddle/fluid/operators/norm_op.cu index d1d9be5074..1d0021d33f 100644 --- a/paddle/fluid/operators/norm_op.cu +++ b/paddle/fluid/operators/norm_op.cu @@ -16,9 +16,9 @@ limitations under the License. */ #include "paddle/fluid/operators/norm_op.h" namespace ops = paddle::operators; -REGISTER_OP_CUDA_KERNEL( - norm, ops::NormKernel, - ops::NormKernel); -REGISTER_OP_CUDA_KERNEL( - norm_grad, ops::NormGradKernel, - ops::NormGradKernel); +using CUDA = paddle::platform::CUDADeviceContext; + +REGISTER_OP_CUDA_KERNEL(norm, ops::NormKernel, + ops::NormKernel); +REGISTER_OP_CUDA_KERNEL(norm_grad, ops::NormGradKernel, + ops::NormGradKernel); diff --git a/paddle/fluid/operators/norm_op.h b/paddle/fluid/operators/norm_op.h index 0ad29e8a03..3167bdc8ac 100644 --- a/paddle/fluid/operators/norm_op.h +++ b/paddle/fluid/operators/norm_op.h @@ -19,156 +19,110 @@ limitations under the License. */ namespace paddle { namespace operators { -template +inline void GetDims(const framework::DDim& dim, int axis, int* pre, int* n, + int* post) { + *pre = 1; + *post = 1; + *n = dim[axis]; + for (int i = 0; i < axis; ++i) { + (*pre) *= dim[i]; + } + for (int i = axis + 1; i < dim.size(); ++i) { + (*post) *= dim[i]; + } +} + +template class NormKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { - const framework::Tensor* in_x = context.Input("X"); - const framework::Tensor* scale = context.Input("Scale"); - auto* out = context.Output("Out"); - auto epsilon = static_cast(context.Attr("epsilon")); - out->mutable_data(context.GetPlace()); - int batch_size = in_x->dims()[0]; - int channels = in_x->dims()[1]; - int height = in_x->dims()[2]; - int width = in_x->dims()[3]; - int fea_len = height * width; - auto* place = - context.template device_context().eigen_device(); - auto x = - framework::EigenMatrix::From( - *in_x, framework::make_ddim({batch_size, fea_len * channels})); - // get square - framework::Tensor x_square; - x_square.mutable_data(in_x->dims(), context.GetPlace()); - auto x_square_eigen = - framework::EigenMatrix::From( - x_square, framework::make_ddim({batch_size, fea_len * channels})); - x_square_eigen.device(*place) = x.square(); - auto scale_eigen = - framework::EigenVector::Flatten( - *scale); - for (int n = 0; n < batch_size; ++n) { - framework::Tensor in_x_batch = in_x->Slice(n, n + 1); - auto in_x_batch_eigen = - framework::EigenMatrix::From( - in_x_batch, framework::make_ddim({channels, fea_len})); - framework::Tensor x_square_batch = x_square.Slice(n, n + 1); - auto x_square_batch_eigen = - framework::EigenMatrix::From( - x_square_batch, framework::make_ddim({channels, fea_len})); - framework::Tensor out_batch = out->Slice(n, n + 1); - auto out_batch_eigen = - framework::EigenMatrix::From( - out_batch, framework::make_ddim({channels, fea_len})); - framework::Tensor tmp_tensor; - tmp_tensor.mutable_data(framework::make_ddim({1, fea_len}), - context.GetPlace()); - auto tmp = framework::EigenVector::Flatten(tmp_tensor); - // get colsum and sqrt , inverse - auto dim = Eigen::array({{0}}); - tmp.device(*place) = x_square_batch_eigen.sum(dim); - tmp.device(*place) = (tmp + epsilon).sqrt().inverse(); - Eigen::array broadcast_dim_col; - broadcast_dim_col[1] = 1; - broadcast_dim_col[0] = channels; - out_batch_eigen.device(*place) = - in_x_batch_eigen * (tmp.broadcast(broadcast_dim_col)); - Eigen::array broadcast_dim_row; - broadcast_dim_row[1] = fea_len; - broadcast_dim_row[0] = 1; - out_batch_eigen.device(*place) = - out_batch_eigen * (scale_eigen.broadcast(broadcast_dim_row)); - } + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_x = ctx.Input("X"); + auto* out_y = ctx.Output("Out"); + auto* out_norm = ctx.Output("Norm"); + out_y->mutable_data(ctx.GetPlace()); + out_norm->mutable_data(ctx.GetPlace()); + + auto xdim = in_x->dims(); + auto ndim = out_norm->dims(); + T eps = static_cast(ctx.Attr("epsilon")); + int axis = ctx.Attr("axis"); + if (axis < 0) axis = xdim.size() + axis; + int pre, n, post; + GetDims(xdim, axis, &pre, &n, &post); + + auto* place = ctx.template device_context().eigen_device(); + + Eigen::DSizes shape(pre, n, post); + Eigen::DSizes norm_shape(pre, post); + + auto x_e = framework::EigenVector::Flatten(*in_x); + auto y_e = framework::EigenVector::Flatten(*out_y); + auto norm_e = framework::EigenVector::Flatten(*out_norm); + auto x = x_e.reshape(shape); + auto y = y_e.reshape(shape); + auto norm = norm_e.reshape(norm_shape); + + Eigen::DSizes rdim(1); + // y = x / sqrt((sum(x * x) + epsilon)) + // norm = sqrt(sum(x * x) + epsilon) + auto sum = x.pow(2).sum(rdim) + eps; + norm.device(*place) = sum.sqrt(); + // y = x / norm + Eigen::DSizes rshape(pre, 1, post); + Eigen::DSizes bcast(1, n, 1); + y.device(*place) = x / norm.reshape(rshape).broadcast(bcast); } }; template class NormGradKernel : public framework::OpKernel { public: - void Compute(const framework::ExecutionContext& context) const override { - const framework::Tensor* in_x = context.Input("X"); - const framework::Tensor* scale = context.Input("Scale"); - const framework::Tensor* out_grad = - context.Input(framework::GradVarName("Out")); - auto epsilon = static_cast(context.Attr("epsilon")); - framework::Tensor* in_x_grad = - context.Output(framework::GradVarName("X")); - in_x_grad->mutable_data(context.GetPlace()); - int batch_size = in_x->dims()[0]; - int channels = in_x->dims()[1]; - int height = in_x->dims()[2]; - int width = in_x->dims()[3]; - int fea_len = height * width; - auto* place = - context.template device_context().eigen_device(); - - auto scale_eigen = - framework::EigenVector::Flatten( - *scale); - auto x = - framework::EigenMatrix::From( - *in_x, framework::make_ddim({batch_size, fea_len * channels})); - // get square - framework::Tensor x_square; - x_square.mutable_data(in_x->dims(), context.GetPlace()); - auto x_square_eigen = - framework::EigenMatrix::From( - x_square, framework::make_ddim({batch_size, fea_len * channels})); - x_square_eigen.device(*place) = x.square(); - - for (int n = 0; n < batch_size; ++n) { - framework::Tensor in_x_batch = in_x->Slice(n, n + 1); - auto in_x_batch_eigen = - framework::EigenMatrix::From( - in_x_batch, framework::make_ddim({channels, fea_len})); - framework::Tensor in_g_batch = in_x_grad->Slice(n, n + 1); - auto in_g_batch_eigen = - framework::EigenMatrix::From( - in_g_batch, framework::make_ddim({channels, fea_len})); - framework::Tensor x_square_batch = x_square.Slice(n, n + 1); - auto x_square_batch_eigen = - framework::EigenMatrix::From( - x_square_batch, framework::make_ddim({channels, fea_len})); - framework::Tensor outg_batch = out_grad->Slice(n, n + 1); - auto outg_batch_eigen = - framework::EigenMatrix::From( - outg_batch, framework::make_ddim({channels, fea_len})); - - framework::Tensor tmp_tensor; - tmp_tensor.mutable_data(framework::make_ddim({1, fea_len}), - context.GetPlace()); - auto tmp_eigen = - framework::EigenVector::Flatten(tmp_tensor); - auto dim = Eigen::array({{0}}); - tmp_eigen.device(*place) = (in_x_batch_eigen * outg_batch_eigen).sum(dim); - framework::Tensor norm_tmp_tensor; - norm_tmp_tensor.mutable_data(framework::make_ddim({1, fea_len}), - context.GetPlace()); - auto norm_tmp_eigen = - framework::EigenVector::Flatten(norm_tmp_tensor); - norm_tmp_eigen.device(*place) = - (x_square_batch_eigen.sum(dim) + epsilon).sqrt(); - Eigen::array broadcast_dim_col; - broadcast_dim_col[1] = 1; - broadcast_dim_col[0] = channels; - in_g_batch_eigen.device(*place) = - in_x_batch_eigen * tmp_eigen.broadcast(broadcast_dim_col); - in_g_batch_eigen.device(*place) = - in_g_batch_eigen / - (norm_tmp_eigen * norm_tmp_eigen).broadcast(broadcast_dim_col); - in_g_batch_eigen.device(*place) = outg_batch_eigen - in_g_batch_eigen; - // outg_batch_eigen + (in_g_batch_eigen * -1); - in_g_batch_eigen.device(*place) = - in_g_batch_eigen / norm_tmp_eigen.broadcast(broadcast_dim_col); - Eigen::array broadcast_dim_row; - broadcast_dim_row[1] = fea_len; - broadcast_dim_row[0] = 1; - in_g_batch_eigen.device(*place) = - in_g_batch_eigen * (scale_eigen.broadcast(broadcast_dim_row)); - } + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_x = ctx.Input("X"); + auto* in_norm = ctx.Input("Norm"); + auto* in_dy = ctx.Input(framework::GradVarName("Out")); + auto* out_dx = ctx.Output(framework::GradVarName("X")); + out_dx->mutable_data(ctx.GetPlace()); + + auto xdim = in_x->dims(); + int axis = ctx.Attr("axis"); + if (axis < 0) axis = xdim.size() + axis; + int pre, n, post; + GetDims(xdim, axis, &pre, &n, &post); + + auto* place = ctx.template device_context().eigen_device(); + + auto x_e = framework::EigenVector::Flatten(*in_x); + auto dy_e = framework::EigenVector::Flatten(*in_dy); + auto norm_e = framework::EigenVector::Flatten(*in_norm); + auto dx_e = framework::EigenVector::Flatten(*out_dx); + + Eigen::DSizes shape(pre, n, post); + Eigen::DSizes norm_shape(pre, post); + auto x = x_e.reshape(shape); + auto dy = dy_e.reshape(shape); + auto norm = norm_e.reshape(norm_shape); + auto dx = dx_e.reshape(shape); + + framework::Tensor rsum; + rsum.mutable_data({pre, post}, ctx.GetPlace()); + auto sum = framework::EigenTensor::From(rsum); + + Eigen::DSizes rdim(1); + Eigen::DSizes bcast(1, n, 1); + Eigen::DSizes rshape(pre, 1, post); + + // dx = ( dy/sqrt(sum(x*x)) ) * [1 - x*sum(x) / (sum(x*x) + e)] + // = [dy - dy * x * sum(x) / (sum(x*x) + e)] / sqrt(sum(x*x)) + // = [dy - x * sum(x*dy) / (sum(x*x) + e)] / sqrt(sum(x*x)) + // 1. sum = sum(x*dy) + sum.device(*place) = (x * dy).sum(rdim); + // 2. dx = x * sum + dx.device(*place) = sum.reshape(rshape).broadcast(bcast) * x; + // 3. dx / (sum(x*x) + e) + // where, norm.pow(2) = sum(x*x) + e, which is calculated in forward. + dx.device(*place) = dx / norm.pow(2).broadcast(bcast); + // 4. [dy - dx] / sqrt(sum(x*x)) + dx.device(*place) = (dy - dx) / norm.broadcast(bcast); } }; } // namespace operators diff --git a/paddle/fluid/operators/one_hot_op.cc b/paddle/fluid/operators/one_hot_op.cc index 1d42dfdd76..4fcb1d6993 100644 --- a/paddle/fluid/operators/one_hot_op.cc +++ b/paddle/fluid/operators/one_hot_op.cc @@ -46,8 +46,7 @@ class OneHotOp : public framework::OperatorWithKernel { class OneHotOpMaker : public framework::OpProtoAndCheckerMaker { public: - OneHotOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, LoDTensor) Input variable with rank at least 2. " "The last dimension of X should be 1. Each value of X is an index " diff --git a/paddle/fluid/operators/pad_op.cc b/paddle/fluid/operators/pad_op.cc index d2a0106f80..d4b631a6f5 100644 --- a/paddle/fluid/operators/pad_op.cc +++ b/paddle/fluid/operators/pad_op.cc @@ -48,8 +48,7 @@ class PadOp : public framework::OperatorWithKernel { class PadOpMaker : public framework::OpProtoAndCheckerMaker { public: - PadOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input of pad op. " "The input should be a k-D tensor(k > 0 and k < 7)"); diff --git a/paddle/fluid/operators/parallel_do_op.cc b/paddle/fluid/operators/parallel_do_op.cc index ae34fe2184..eb09470f37 100644 --- a/paddle/fluid/operators/parallel_do_op.cc +++ b/paddle/fluid/operators/parallel_do_op.cc @@ -18,7 +18,6 @@ limitations under the License. */ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/threadpool.h" #include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -164,14 +163,11 @@ class ParallelDoOp : public framework::OperatorBase { auto &place = places[place_idx]; auto *cur_scope = sub_scopes[place_idx]; - workers.emplace_back( - framework::Async([program, cur_scope, place, block, place_idx] { - // Give the thread an id to distinguish parallel block with same id. - platform::RecordThread rt(static_cast(place_idx) + 1); - framework::Executor executor(place); - executor.Run(*program, cur_scope, block->ID(), - false /*create_local_scope*/); - })); + workers.emplace_back(framework::Async([program, cur_scope, place, block] { + framework::Executor executor(place); + executor.Run(*program, cur_scope, block->ID(), + false /*create_local_scope*/); + })); } for (auto &worker : workers) { worker.wait(); @@ -196,8 +192,7 @@ class ParallelDoOp : public framework::OperatorBase { class ParallelDoOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ParallelDoOpProtoMaker(OpProto *proto, framework::OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kInputs, "").AsDuplicable(); AddInput(kParameters, "").AsDuplicable(); AddInput(kPlaces, ""); @@ -243,14 +238,11 @@ class ParallelDoGradOp : public framework::OperatorBase { auto *cur_scope = sub_scopes[i]; // execute - workers.emplace_back( - framework::Async([program, cur_scope, place, block, i] { - // Give the thread an id to distinguish parallel block with same id. - platform::RecordThread rt(static_cast(i) + 1); - framework::Executor executor(place); - executor.Run(*program, cur_scope, block->ID(), - false /*create_local_scope*/); - })); + workers.emplace_back(framework::Async([program, cur_scope, place, block] { + framework::Executor executor(place); + executor.Run(*program, cur_scope, block->ID(), + false /*create_local_scope*/); + })); } for (auto &worker : workers) { worker.wait(); @@ -296,7 +288,7 @@ class ParallelDoGradOp : public framework::OperatorBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {s, tmp_name}}}, {{"Out", {s}}}, - framework::AttributeMap{}); + framework::AttributeMap{{"use_mkldnn", {false}}}); VLOG(10) << sum_op->DebugStringEx(sub_scopes[0]); sum_op->Run(*sub_scopes[0], places[0]); WaitOnPlace(places[0]); diff --git a/paddle/fluid/operators/pool_cudnn_op.cu.cc b/paddle/fluid/operators/pool_cudnn_op.cu.cc index d60a99994e..31f083565f 100644 --- a/paddle/fluid/operators/pool_cudnn_op.cu.cc +++ b/paddle/fluid/operators/pool_cudnn_op.cu.cc @@ -81,7 +81,7 @@ class PoolCUDNNOpKernel : public framework::OpKernel { // ------------------- cudnn pool algorithm --------------------- auto handle = ctx.cuda_device_context().cudnn_handle(); ScalingParamType alpha = 1.0f, beta = 0.0f; - PADDLE_ENFORCE(platform::dynload::cudnnPoolingForward( + CUDNN_ENFORCE(platform::dynload::cudnnPoolingForward( handle, cudnn_pool_desc, &alpha, cudnn_input_desc, input_data, &beta, cudnn_output_desc, output_data)); } @@ -135,7 +135,11 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { PoolingMode pooling_mode; if (pooling_type == "max") { - pooling_mode = PoolingMode::kMaximum; + if (FLAGS_cudnn_deterministic) { + pooling_mode = PoolingMode::kMaximumDeterministic; + } else { + pooling_mode = PoolingMode::kMaximum; + } } else { pooling_mode = PoolingMode::kAverage; } @@ -150,7 +154,7 @@ class PoolCUDNNGradOpKernel : public framework::OpKernel { T *input_grad_data = input_grad->mutable_data(ctx.GetPlace()); // Because beta is zero, it is unnecessary to reset input_grad. - PADDLE_ENFORCE(platform::dynload::cudnnPoolingBackward( + CUDNN_ENFORCE(platform::dynload::cudnnPoolingBackward( handle, cudnn_pool_desc, &alpha, cudnn_output_desc, output_data, cudnn_output_desc, output_grad_data, cudnn_input_desc, input_data, &beta, cudnn_input_desc, input_grad_data)); diff --git a/paddle/fluid/operators/pool_mkldnn_op.cc b/paddle/fluid/operators/pool_mkldnn_op.cc index 63eaaedcd5..5341187d1c 100644 --- a/paddle/fluid/operators/pool_mkldnn_op.cc +++ b/paddle/fluid/operators/pool_mkldnn_op.cc @@ -18,6 +18,34 @@ limitations under the License. */ namespace paddle { namespace operators { +using framework::DataLayout; +using mkldnn::memory; +using mkldnn::pooling_backward; +using mkldnn::pooling_forward; +using mkldnn::primitive; +using mkldnn::reorder; +using mkldnn::stream; +using platform::to_void_cast; + +// Generate keys for storing/retriving primitives for this operator +// TODO(jczaja): Make hashing function more optimial +static std::string gethash(const memory::dims& input_dims, + const std::string& pooling_type, + const std::vector& ksize, + const std::vector& strides, + const std::vector& paddings, + const std::string& suffix) { + auto dims2str = [](const memory::dims& operand_dims) { + std::string dstr = ""; + for (size_t i = 0; i < operand_dims.size(); ++i) { + dstr += std::to_string(operand_dims[i]) + "-"; + } + return dstr; + }; + return dims2str(input_dims) + dims2str(ksize) + dims2str(strides) + + dims2str(paddings) + pooling_type + suffix; +} + template class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { public: @@ -32,12 +60,9 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { const Tensor* input = ctx.Input("X"); Tensor* output = ctx.Output("Out"); - // Get an unique name from "argument" name of "Out" variable - // This name will be used as key when saving info into device context - const std::string key = ctx.op().Output("Out"); - const std::string key_pool_pd = key + "@pool_pd"; - const std::string key_pool_workspace_memory = - key + "@pool_workspace_memory"; + PADDLE_ENFORCE(input->layout() == DataLayout::kMKLDNN && + input->format() != memory::format::format_undef, + "Wrong layout/format set for Input tensor"); std::string pooling_type = ctx.Attr("pooling_type"); std::vector ksize = ctx.Attr>("ksize"); @@ -63,38 +88,84 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { std::vector src_tz = paddle::framework::vectorize2int(input->dims()); std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); - // TODO(pzelazko-intel): support more formats - auto src_md = platform::MKLDNNMemDesc(src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - auto dst_md = platform::MKLDNNMemDesc(dst_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - - std::shared_ptr pool_pd = - CreatePrimitiveDesc(src_md, dst_md, strides, paddings, ksize, - pooling_type, mkldnn_engine); - - // save pool_pd into global device context to be referred in backward path - dev_ctx.SetBlob(key_pool_pd, pool_pd); - - std::shared_ptr workspace_memory = - CreateWorkspaceMemory(pool_pd, pooling_type, mkldnn_engine); - - // save pool_workspace_memory to be referred in backward path - dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory); + auto input_format = input->format(); + memory::format output_format{memory::format::format_undef}; - auto src_memory = - mkldnn::memory({src_md, mkldnn_engine}, - static_cast(const_cast(input_data))); - auto dst_memory = - mkldnn::memory({dst_md, mkldnn_engine}, - static_cast(const_cast(output_data))); + const std::string key = gethash(src_tz, pooling_type, ksize, strides, + paddings, ctx.op().Output("Out")); + const std::string key_pool_p = key + "@pool_p"; + const std::string key_pool_pd = key + "@pool_pd"; + const std::string key_pool_src_mem_p = key + "@pool_src_mem_p"; + const std::string key_pool_dst_mem_p = key + "@pool_dst_mem_p"; + const std::string key_pool_workspace_memory = + key + "@pool_workspace_memory"; - auto pool_prim = mkldnn::pooling_forward(*pool_pd, src_memory, dst_memory, - *workspace_memory); + auto pool_p = + std::static_pointer_cast(dev_ctx.GetBlob(key_pool_p)); + if (pool_p == nullptr) { + auto src_md = platform::MKLDNNMemDesc( + src_tz, platform::MKLDNNGetDataType(), input_format); + + /* create memory descriptor for pooling without specified format + * ('any') which lets a primitive (pooling in this case) choose + * the memory format preferred for best performance + */ + auto dst_md = platform::MKLDNNMemDesc(dst_tz, mkldnn::memory::f32, + mkldnn::memory::format::any); + + std::shared_ptr pool_pd = + CreatePrimitiveDesc(src_md, dst_md, strides, paddings, ksize, + pooling_type, mkldnn_engine); + + // save pool_pd into global device context to be referred in backward path + dev_ctx.SetBlob(key_pool_pd, pool_pd); + + std::shared_ptr workspace_memory = + CreateWorkspaceMemory(pool_pd, pooling_type, mkldnn_engine); + + // save pool_workspace_memory to be referred in backward path + dev_ctx.SetBlob(key_pool_workspace_memory, workspace_memory); + + auto src_memory = std::make_shared(pool_pd->src_primitive_desc(), + to_void_cast(input_data)); + auto dst_memory = + std::make_shared(pool_pd->dst_primitive_desc(), output_data); + + dev_ctx.SetBlob(key_pool_src_mem_p, src_memory); + dev_ctx.SetBlob(key_pool_dst_mem_p, dst_memory); + + pool_p = std::make_shared(*pool_pd, *(src_memory.get()), + *(dst_memory.get()), + *workspace_memory); + + dev_ctx.SetBlob(key_pool_p, pool_p); + + output_format = + (memory::format)dst_memory->get_primitive_desc().desc().data.format; + } else { + // Primitives already exist + auto pool_src_memory_p = + std::static_pointer_cast(dev_ctx.GetBlob(key_pool_src_mem_p)); + PADDLE_ENFORCE(pool_src_memory_p != nullptr, + "Fail to find pooling src mem_p in device context"); + auto pool_dst_memory_p = + std::static_pointer_cast(dev_ctx.GetBlob(key_pool_dst_mem_p)); + PADDLE_ENFORCE(pool_dst_memory_p != nullptr, + "Fail to find pooling dst mem_p in device context"); + pool_src_memory_p->set_data_handle(to_void_cast(input_data)); + pool_dst_memory_p->set_data_handle(output_data); + + output_format = (memory::format)pool_dst_memory_p->get_primitive_desc() + .desc() + .data.format; + } // push primitive to stream and wait until it's executed - std::vector pipeline{pool_prim}; - mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + std::vector pipeline{*(pool_p.get())}; + stream(stream::kind::eager).submit(pipeline).wait(); + + output->set_layout(DataLayout::kMKLDNN); + output->set_format(output_format); } private: @@ -120,9 +191,10 @@ class PoolMKLDNNOpKernel : public paddle::framework::OpKernel { mkldnn::memory::primitive_desc workspace_md = pooling_type == "max" ? pool_pd->workspace_primitive_desc() - : mkldnn::memory::primitive_desc( - {{}, mkldnn::memory::f32, mkldnn::memory::format::nchw}, - engine); + : mkldnn::memory::primitive_desc({{}, + platform::MKLDNNGetDataType(), + mkldnn::memory::format::nchw}, + engine); auto p_workspace_memory = new mkldnn::memory(workspace_md); return std::unique_ptr(p_workspace_memory); @@ -140,12 +212,12 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { const Tensor* out_grad = ctx.Input(framework::GradVarName("Out")); Tensor* in_x_grad = ctx.Output(framework::GradVarName("X")); - // Get an unique name from "argument" name of "Out" variable - // This name will be used as key when referring info from device context - const std::string key = ctx.op().Input("Out"); - const std::string key_pool_pd = key + "@pool_pd"; - const std::string key_pool_workspace_memory = - key + "@pool_workspace_memory"; + PADDLE_ENFORCE(in_x->layout() == DataLayout::kMKLDNN && + in_x->format() != memory::format::format_undef, + "Wrong layout/format set for Input X tensor"); + PADDLE_ENFORCE(out_grad->layout() == DataLayout::kMKLDNN && + out_grad->format() != memory::format::format_undef, + "Wrong layout/format set for Input output_grad tensor"); std::string pooling_type = ctx.Attr("pooling_type"); std::vector ksize = ctx.Attr>("ksize"); @@ -165,57 +237,139 @@ class PoolMKLDNNGradOpKernel : public paddle::framework::OpKernel { const T* out_grad_data = out_grad->data(); T* in_x_grad_data = in_x_grad->mutable_data(ctx.GetPlace()); + memory::format in_x_grad_format{memory::format::format_undef}; std::vector diff_src_tz = paddle::framework::vectorize2int(in_x_grad->dims()); std::vector diff_dst_tz = paddle::framework::vectorize2int(out_grad->dims()); - auto diff_src_md = platform::MKLDNNMemDesc(diff_src_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - auto diff_dst_md = platform::MKLDNNMemDesc(diff_dst_tz, mkldnn::memory::f32, - mkldnn::memory::format::nchw); - - // Retrieve pool_pd/pool_workspace_memory from device context - auto pool_pd = - std::static_pointer_cast( - dev_ctx.GetBlob(key_pool_pd)); - PADDLE_ENFORCE(pool_pd != nullptr, - "Fail to find pool_pd in device context"); + // Get an unique name from "argument" name of "Out" variable + // This name will be used as key when referring info from device context + const std::string key = gethash(diff_src_tz, pooling_type, ksize, strides, + paddings, ctx.op().Input("Out")); + const std::string key_pool_bwd_p = key + "@pool_bwd_p"; + const std::string key_pool_diff_src_mem_p = key + "@pool_diff_src_mem_p"; + const std::string key_pool_diff_dst_mem_p = key + "@pool_diff_dst_mem_p"; + const std::string key_pool_src_mem_p = key + "@pool_src_mem_p"; + const std::string key_pool_dst_mem_p = key + "@pool_dst_mem_p"; + const std::string key_pool_pd = key + "@pool_pd"; + const std::string key_pool_workspace_memory = + key + "@pool_workspace_memory"; - auto workspace_memory = std::static_pointer_cast( - dev_ctx.GetBlob(key_pool_workspace_memory)); - PADDLE_ENFORCE(workspace_memory != nullptr, - "Fail to find workspace_memory in device context"); + auto user_diff_dst_memory = + memory({{{diff_dst_tz}, memory::data_type::f32, out_grad->format()}, + mkldnn_engine}, + to_void_cast(out_grad_data)); - auto pool_bwd_desc = mkldnn::pooling_backward::desc( - pooling_type == "max" ? mkldnn::algorithm::pooling_max - : mkldnn::algorithm::pooling_avg, - diff_src_md, diff_dst_md, strides, ksize, paddings, paddings, - mkldnn::padding_kind::zero); - auto pool_bwd_pd = mkldnn::pooling_backward::primitive_desc( - pool_bwd_desc, mkldnn_engine, *pool_pd); + std::shared_ptr diff_src_memory; + std::shared_ptr diff_dst_memory; + auto dst_memory = + std::static_pointer_cast(dev_ctx.GetBlob(key_pool_dst_mem_p)); + PADDLE_ENFORCE(dst_memory != nullptr, + "Fail to find dst_memory in device context"); + + primitive reorder_diff_dst; + bool is_diff_dst_reordered = false; + auto pool_bwd_p = std::static_pointer_cast( + dev_ctx.GetBlob(key_pool_bwd_p)); + if (pool_bwd_p == nullptr) { + // Retrieve src_memory/dst_memory saved in forward pass + auto src_memory = + std::static_pointer_cast(dev_ctx.GetBlob(key_pool_src_mem_p)); + PADDLE_ENFORCE(src_memory != nullptr, + "Fail to find src_memory in device context"); + // Retrieve pool_pd/pool_workspace_memory from device context + auto pool_pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_pool_pd)); + PADDLE_ENFORCE(pool_pd != nullptr, + "Fail to find pool_pd in device context"); + auto workspace_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_pool_workspace_memory)); + PADDLE_ENFORCE(workspace_memory != nullptr, + "Fail to find workspace_memory in device context"); + + // create memory descriptors for pooling + auto diff_src_md = src_memory.get()->get_primitive_desc().desc(); + auto diff_dst_md = dst_memory.get()->get_primitive_desc().desc(); + + auto pool_bwd_desc = mkldnn::pooling_backward::desc( + pooling_type == "max" ? mkldnn::algorithm::pooling_max + : mkldnn::algorithm::pooling_avg, + diff_src_md, diff_dst_md, strides, ksize, paddings, paddings, + mkldnn::padding_kind::zero); + auto pool_bwd_pd = mkldnn::pooling_backward::primitive_desc( + pool_bwd_desc, mkldnn_engine, *pool_pd); + + // reorder between user_diff_dst and pool diff_dst if needed + diff_dst_memory = std::make_shared(user_diff_dst_memory); + if (memory::primitive_desc(dst_memory->get_primitive_desc()) != + user_diff_dst_memory.get_primitive_desc()) { + diff_dst_memory = + std::make_shared(dst_memory.get()->get_primitive_desc()); + reorder_diff_dst = reorder(user_diff_dst_memory, *diff_dst_memory); + is_diff_dst_reordered = true; + } - auto diff_src_memory = - mkldnn::memory({diff_src_md, mkldnn_engine}, - static_cast(const_cast(in_x_grad_data))); - auto diff_dst_memory = - mkldnn::memory({diff_dst_md, mkldnn_engine}, - static_cast(const_cast(out_grad_data))); + diff_src_memory = std::make_shared( + pool_bwd_pd.diff_src_primitive_desc(), in_x_grad_data); + + dev_ctx.SetBlob(key_pool_diff_src_mem_p, diff_src_memory); + dev_ctx.SetBlob(key_pool_diff_dst_mem_p, diff_dst_memory); + + pool_bwd_p = std::make_shared( + pool_bwd_pd, *(diff_dst_memory.get()), *workspace_memory, + *(diff_src_memory)); + dev_ctx.SetBlob(key_pool_bwd_p, pool_bwd_p); + + } else { + // Primitives already exist + diff_src_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_pool_diff_src_mem_p)); + PADDLE_ENFORCE(diff_src_memory != nullptr, + "Fail to find pooling src mem_p in device context"); + diff_dst_memory = std::static_pointer_cast( + dev_ctx.GetBlob(key_pool_diff_dst_mem_p)); + PADDLE_ENFORCE(diff_dst_memory != nullptr, + "Fail to find pooling dst mem_p in device context"); + + diff_src_memory->set_data_handle(reinterpret_cast(in_x_grad_data)); + diff_dst_memory->set_data_handle(const_cast(out_grad_data)); + + // reorder between user_diff_dst and pool diff_dst if needed + if (memory::primitive_desc(dst_memory->get_primitive_desc()) != + user_diff_dst_memory.get_primitive_desc()) { + diff_dst_memory = + std::make_shared(dst_memory.get()->get_primitive_desc()); + reorder_diff_dst = reorder(user_diff_dst_memory, *diff_dst_memory); + is_diff_dst_reordered = true; + } + } - auto bwd_prim = mkldnn::pooling_backward( - pool_bwd_pd, diff_dst_memory, *workspace_memory, diff_src_memory); + in_x_grad_format = (memory::format)diff_src_memory->get_primitive_desc() + .desc() + .data.format; // push primitive to stream and wait until it's executed - std::vector pipeline{bwd_prim}; + std::vector pipeline; + if (is_diff_dst_reordered) { + pipeline.push_back(reorder_diff_dst); + } + pipeline.push_back(*(pool_bwd_p.get())); mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); + + in_x_grad->set_layout(DataLayout::kMKLDNN); + in_x_grad->set_format(in_x_grad_format); } // Compute() }; } // namespace operators } // namespace paddle +namespace ops = paddle::operators; + REGISTER_OP_KERNEL(pool2d, MKLDNN, ::paddle::platform::CPUPlace, - paddle::operators::PoolMKLDNNOpKernel); + ops::PoolMKLDNNOpKernel); REGISTER_OP_KERNEL(pool2d_grad, MKLDNN, ::paddle::platform::CPUPlace, - paddle::operators::PoolMKLDNNGradOpKernel); + ops::PoolMKLDNNGradOpKernel); diff --git a/paddle/fluid/operators/pool_op.cc b/paddle/fluid/operators/pool_op.cc index f2de075e0d..f8ad63690e 100644 --- a/paddle/fluid/operators/pool_op.cc +++ b/paddle/fluid/operators/pool_op.cc @@ -83,6 +83,9 @@ void PoolOp::InferShape(framework::InferShapeContext *ctx) const { framework::OpKernelType PoolOp::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { library_ = framework::LibraryType::kCUDNN; @@ -92,11 +95,10 @@ framework::OpKernelType PoolOp::GetExpectedKernelType( if (library_ == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; } #endif - std::string data_format = ctx.Attr("data_format"); - framework::DataLayout layout_ = framework::StringToDataLayout(data_format); return framework::OpKernelType( framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), layout_, library_); @@ -112,6 +114,9 @@ void PoolOpGrad::InferShape(framework::InferShapeContext *ctx) const { framework::OpKernelType PoolOpGrad::GetExpectedKernelType( const framework::ExecutionContext &ctx) const { framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { library_ = framework::LibraryType::kCUDNN; @@ -121,6 +126,7 @@ framework::OpKernelType PoolOpGrad::GetExpectedKernelType( if (library_ == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; } #endif @@ -129,14 +135,11 @@ framework::OpKernelType PoolOpGrad::GetExpectedKernelType( PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN, "float16 can only be used when CUDNN is used"); } - std::string data_format = ctx.Attr("data_format"); - framework::DataLayout layout_ = framework::StringToDataLayout(data_format); return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, library_); } -Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Pool2dOpMaker::Make() { AddInput( "X", "(Tensor) The input tensor of pooling operator. " @@ -148,7 +151,8 @@ Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) "The format of output tensor is also NCHW, " "where N is batch size, C is the number of channels, " "H is the height of the feature, " - "and W is the width of the feature."); + "and W is the width of the feature.") + .Reuse("X"); AddAttr("pooling_type", "(string), pooling type, can be \"max\" for max-pooling " @@ -200,8 +204,6 @@ Pool2dOpMaker::Pool2dOpMaker(OpProto *proto, OpAttrChecker *op_checker) // TODO(dzhwinter): need to registered layout transform function AddComment(R"DOC( -Pool2d Operator. - The pooling2d operation calculates the output based on the input, pooling_type and ksize, strides, paddings parameters. Input(X) and output(Out) are in NCHW format, where N is batch size, C is the @@ -211,26 +213,34 @@ These two elements represent height and width, respectively. The input(X) size and output(Out) size may be different. Example: + Input: + X shape: $(N, C, H_{in}, W_{in})$ + Output: + Out shape: $(N, C, H_{out}, W_{out})$ + For ceil_mode = false: $$ - H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\ - W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 + H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 + $$ + $$ + W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 $$ For ceil_mode = true: $$ - H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 \\ - W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 + H_{out} = \\frac{(H_{in} - ksize[0] + 2 * paddings[0] + strides[0] - 1)}{strides[0]} + 1 + $$ + $$ + W_{out} = \\frac{(W_{in} - ksize[1] + 2 * paddings[1] + strides[1] - 1)}{strides[1]} + 1 $$ )DOC"); } -Pool3dOpMaker::Pool3dOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { +void Pool3dOpMaker::Make() { AddInput("X", "(Tensor) The input tensor of pooling operator. " "The format of input tensor is NCDHW, where N is batch size, C is " @@ -242,7 +252,8 @@ Pool3dOpMaker::Pool3dOpMaker(OpProto *proto, OpAttrChecker *op_checker) "The format of output tensor is also NCDHW, " "where N is batch size, C is " "the number of channels, and D, H and W is the depth, height and " - "width of the feature, respectively."); + "width of the feature, respectively.") + .Reuse("X"); AddAttr("pooling_type", "(string) Pooling type, can be \"max\" for max-pooling " diff --git a/paddle/fluid/operators/pool_op.h b/paddle/fluid/operators/pool_op.h index a48127ea69..a63963ca92 100644 --- a/paddle/fluid/operators/pool_op.h +++ b/paddle/fluid/operators/pool_op.h @@ -50,12 +50,12 @@ class PoolOpGrad : public framework::OperatorWithKernel { class Pool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool2dOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; class Pool3dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Pool3dOpMaker(OpProto* proto, OpAttrChecker* op_checker); + void Make() override; }; template diff --git a/paddle/fluid/operators/pool_with_index_op.cc b/paddle/fluid/operators/pool_with_index_op.cc index 848cd61b23..873706593e 100644 --- a/paddle/fluid/operators/pool_with_index_op.cc +++ b/paddle/fluid/operators/pool_with_index_op.cc @@ -100,8 +100,7 @@ class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel { class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxPool2dWithIndexOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of pooling operator. " @@ -177,8 +176,7 @@ Example: class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker { public: - MaxPool3dWithIndexOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input tensor of pooling operator. " "The format of input tensor is NCDHW, where N is batch size, C is " diff --git a/paddle/fluid/operators/positive_negative_pair_op.cc b/paddle/fluid/operators/positive_negative_pair_op.cc index d237da25a0..4d865b7f17 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.cc +++ b/paddle/fluid/operators/positive_negative_pair_op.cc @@ -95,8 +95,7 @@ class PositiveNegativePairOp : public framework::OperatorWithKernel { class PositiveNegativePairOpMaker : public framework::OpProtoAndCheckerMaker { public: - PositiveNegativePairOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Score", "(Tensor, float) Model Score on an item (with " "respect to QueryID). It's a 2-D tensor with shape [batch_size, " diff --git a/paddle/fluid/operators/positive_negative_pair_op.h b/paddle/fluid/operators/positive_negative_pair_op.h index f20f33bbeb..db0a1002f4 100644 --- a/paddle/fluid/operators/positive_negative_pair_op.h +++ b/paddle/fluid/operators/positive_negative_pair_op.h @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { namespace operators { diff --git a/paddle/fluid/operators/precision_recall_op.cc b/paddle/fluid/operators/precision_recall_op.cc index c34b0d072b..e7ce16f33f 100644 --- a/paddle/fluid/operators/precision_recall_op.cc +++ b/paddle/fluid/operators/precision_recall_op.cc @@ -90,8 +90,7 @@ class PrecisionRecallOp : public framework::OperatorWithKernel { class PrecisionRecallOpMaker : public framework::OpProtoAndCheckerMaker { public: - PrecisionRecallOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("MaxProbs", "(Tensor, default Tensor) A 2-D tensor with shape N x 1, " "where N is the batch size. Each row contains the max probability " diff --git a/paddle/fluid/operators/prefetch_op.cc b/paddle/fluid/operators/prefetch_op.cc index f9ae01ab5d..4b804740a0 100644 --- a/paddle/fluid/operators/prefetch_op.cc +++ b/paddle/fluid/operators/prefetch_op.cc @@ -18,7 +18,7 @@ limitations under the License. */ #include "paddle/fluid/framework/data_type.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/macros.h" #include "paddle/fluid/operators/send_recv_util.h" namespace paddle { @@ -41,35 +41,26 @@ class PrefetchOp : public framework::OperatorBase { platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); - auto client_var_name = Output("RPCClient"); - PADDLE_ENFORCE_NOT_NULL(scope.FindVar(client_var_name), - "Can not find variable '%s' in the scope.", - client_var_name); - auto* client_var = scope.FindVar(client_var_name); - detail::RPCClient* rpc_client = client_var->GetMutable(); + distributed::RPCClient* rpc_client = + distributed::RPCClient::GetInstance(); for (size_t i = 0; i < ins.size(); i++) { if (NeedSend(scope, ins[i])) { VLOG(3) << "sending " << ins[i] << " to " << epmap[i] << " to get " << outs[i] << " back"; - rpc_client->AsyncPrefetchVariable(epmap[i], ctx, scope, ins[i], - outs[i]); + rpc_client->AsyncPrefetchVar(epmap[i], ctx, scope, ins[i], outs[i]); } else { VLOG(3) << "don't send no-initialied variable: " << ins[i]; } } - PADDLE_ENFORCE(rpc_client->Wait()); + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); } }; class PrefetchOpMaker : public framework::OpProtoAndCheckerMaker { public: - PrefetchOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddInput("X", "(LoDTensor) Input Id variables to be sent").AsDuplicable(); - AddOutput("RPCClient", - "(RPCClient) The RPC client object which will be" - "initialized at most once."); AddOutput("Out", "(LoDTensor) result " "to be fetched from parameter server") @@ -88,17 +79,6 @@ the parameter server and fetch result back. } }; -class PrefetchOpVarTypeInference : public framework::VarTypeInference { - public: - void operator()(const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override { - auto out_var_name = op_desc.Output("RPCClient").front(); - auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarType::RAW; - out_var.SetType(var_type); - } -}; - class PrefetchOpShapeInference : public framework::InferShapeBase { public: void operator()(framework::InferShapeContext* ctx) const override {} @@ -111,5 +91,4 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(prefetch, ops::PrefetchOp, paddle::framework::EmptyGradOpMaker, ops::PrefetchOpMaker, - ops::PrefetchOpVarTypeInference, ops::PrefetchOpShapeInference); diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index a066b3e06e..db040509bc 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -38,8 +38,7 @@ class PReluOp : public framework::OperatorWithKernel { class PReluOpMaker : public framework::OpProtoAndCheckerMaker { public: - PReluOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input tensor of prelu operator."); AddInput("Alpha", "The alpha weight of prelu operator."); AddOutput("Out", "The output tensor of prelu operator."); diff --git a/paddle/fluid/operators/print_op.cc b/paddle/fluid/operators/print_op.cc index fafc7e54d7..cceac40295 100644 --- a/paddle/fluid/operators/print_op.cc +++ b/paddle/fluid/operators/print_op.cc @@ -16,6 +16,7 @@ #include #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/framework/variable.h" namespace paddle { @@ -62,7 +63,7 @@ struct Formater { } } void PrintDtype() { - if (dtype.hash_code() != typeid(const char).hash_code()) { + if (!framework::IsType(dtype)) { CLOG << "\tdtype: " << dtype.name() << std::endl; } } @@ -83,15 +84,15 @@ struct Formater { void PrintData(size_t size) { PADDLE_ENFORCE_NOT_NULL(data); // print float - if (dtype.hash_code() == typeid(const float).hash_code()) { + if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const double).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const int).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const int64_t).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); - } else if (dtype.hash_code() == typeid(const bool).hash_code()) { + } else if (framework::IsType(dtype)) { Display(size); } else { CLOG << "\tdata: unprintable type: " << dtype.name() << std::endl; @@ -209,8 +210,7 @@ class TensorPrintOp : public framework::OperatorBase { class PrintOpProtoAndCheckMaker : public framework::OpProtoAndCheckerMaker { public: - PrintOpProtoAndCheckMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("In", "Input tensor to be displayed."); AddAttr("first_n", "Only log `first_n` number of times."); AddAttr("message", "A string message to print as a prefix."); diff --git a/paddle/fluid/operators/proximal_adagrad_op.cc b/paddle/fluid/operators/proximal_adagrad_op.cc index e057244c1e..8d8075d761 100644 --- a/paddle/fluid/operators/proximal_adagrad_op.cc +++ b/paddle/fluid/operators/proximal_adagrad_op.cc @@ -66,8 +66,7 @@ class ProximalAdagradOp : public framework::OperatorWithKernel { class ProximalAdagradOpMaker : public framework::OpProtoAndCheckerMaker { public: - ProximalAdagradOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter that has to be updated."); diff --git a/paddle/fluid/operators/proximal_gd_op.cc b/paddle/fluid/operators/proximal_gd_op.cc index ed14726318..baf9cbcba2 100644 --- a/paddle/fluid/operators/proximal_gd_op.cc +++ b/paddle/fluid/operators/proximal_gd_op.cc @@ -54,8 +54,7 @@ class ProximalGDOp : public framework::OperatorWithKernel { class ProximalGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - ProximalGDOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter value that has to be updated."); diff --git a/paddle/fluid/operators/random_crop_op.cc b/paddle/fluid/operators/random_crop_op.cc new file mode 100644 index 0000000000..123fa44fa3 --- /dev/null +++ b/paddle/fluid/operators/random_crop_op.cc @@ -0,0 +1,82 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#include "paddle/fluid/operators/random_crop_op.h" + +namespace paddle { +namespace operators { + +class RandomCropOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + +class RandomCropOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "A batch of instances to random crop."); + AddInput("Seed", "The random seed."); + AddOutput("Out", "The cropped instance batch."); + AddOutput("SeedOut", "The random seed after random cropping.") + .AsIntermediate(); + AddAttr>("shape", "The shape of a cropped instance."); + AddAttr("startup_seed", + "If the input 'Seed' is not initialized, the 'startup_seed' " + "will be used to replace it. Even so, the seed after random " + "crop will also be outputed to the 'SeedOut'.") + .SetDefault(0); + AddComment(R"DOC( + This operator takes a batch of instance, and do random cropping on each instance. + It means that cropping positions differs on each instance, which is determined + by an uniform random generator. All cropped instances have the same shape, which + is determined by the operator's attribute 'shape'. + )DOC"); + } +}; + +class RandomCropOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + auto shape = ctx->Attrs().Get>("shape"); + auto x_dim = ctx->GetInputDim("X"); + PADDLE_ENFORCE_GT(x_dim.size(), static_cast(shape.size())); + auto out_dim = framework::vectorize2int(x_dim); + for (size_t i = 1; i <= shape.size(); ++i) { + size_t x_i = x_dim.size() - i; + size_t shape_i = shape.size() - i; + PADDLE_ENFORCE_GE(x_dim[x_i], shape[shape_i]); + out_dim[x_i] = shape[shape_i]; + } + ctx->SetOutputDim("Out", framework::make_ddim(out_dim)); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +namespace f = paddle::framework; +REGISTER_OPERATOR(random_crop, ops::RandomCropOp, ops::RandomCropOpMaker, + ops::RandomCropOpInferShape, f::EmptyGradOpMaker); + +template +using Kernel = ops::RandomCropKernel; +REGISTER_OP_CPU_KERNEL(random_crop, Kernel, Kernel, Kernel, + Kernel, Kernel); diff --git a/paddle/fluid/operators/random_crop_op.cu b/paddle/fluid/operators/random_crop_op.cu new file mode 100644 index 0000000000..6fc9bedc55 --- /dev/null +++ b/paddle/fluid/operators/random_crop_op.cu @@ -0,0 +1,21 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/random_crop_op.h" + +namespace ops = paddle::operators; +template +using Kernel = ops::RandomCropKernel; +REGISTER_OP_CUDA_KERNEL(random_crop, Kernel, Kernel, Kernel, + Kernel, Kernel); diff --git a/paddle/fluid/operators/random_crop_op.h b/paddle/fluid/operators/random_crop_op.h new file mode 100644 index 0000000000..d68ba9d661 --- /dev/null +++ b/paddle/fluid/operators/random_crop_op.h @@ -0,0 +1,187 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/for_range.h" +#ifdef PADDLE_WITH_CUDA +#include +#endif + +namespace paddle { +namespace operators { + +template +struct Random; + +template <> +struct Random { + using Engine = std::minstd_rand; + + template + using UniformIntDist = std::uniform_int_distribution; +}; + +#ifdef PADDLE_WITH_CUDA +template <> +struct Random { + using Engine = thrust::minstd_rand; + + template + using UniformIntDist = thrust::uniform_int_distribution; +}; +#endif + +template +HOSTDEVICE inline void StridedMemcpy(const T* x, const size_t* x_dims, T* out, + const size_t* out_dims, int i, int rank, + size_t prod_x_remain, + size_t prod_out_remain, + const size_t* offsets) { + size_t x_dim_i = x_dims[i]; + size_t out_dim_i = out_dims[i]; + size_t x_stride = prod_x_remain / x_dim_i; + size_t out_stride = prod_out_remain / out_dim_i; + size_t offset_i = offsets[i]; + + if (i == rank - 1) { + PADDLE_ASSERT(x_stride == 1 && out_stride == 1); + x += offset_i; + for (size_t j = 0; j < out_dim_i; ++j) { + *out++ = *x++; + } + } else { + x += offset_i * x_stride; + for (size_t j = 0; j < out_dim_i; ++j) { + StridedMemcpy(x, x_dims, out, out_dims, i + 1, rank, x_stride, + out_stride, offsets); + x += x_stride; + out += out_stride; + } + } +} + +template +struct RandomCropFunctor { + const T* x_; + T* out_; + size_t x_dims_[9]; + size_t out_dims_[9]; + int num_batchsize_dims_; + int rank_; + int64_t seed_; + + size_t prod_batchsize_dims_; + size_t prod_x_ins_dims_; + size_t prod_out_ins_dims_; + + RandomCropFunctor(const T* x, T* out, const framework::DDim& x_dims, + const framework::DDim& out_dims, int num_batchsize_dims, + int64_t seed) + : x_(x), + out_(out), + num_batchsize_dims_(num_batchsize_dims), + rank_(x_dims.size()), + seed_(seed) { + PADDLE_ENFORCE_EQ(x_dims.size(), out_dims.size()); + PADDLE_ENFORCE_GT(rank_, num_batchsize_dims_); + prod_batchsize_dims_ = 1; + prod_x_ins_dims_ = 1; + prod_out_ins_dims_ = 1; + for (size_t i = 0; i < static_cast(rank_); ++i) { + size_t x_dim_i = x_dims[i]; + size_t out_dim_i = out_dims[i]; + x_dims_[i] = x_dim_i; + out_dims_[i] = out_dim_i; + if (i < static_cast(num_batchsize_dims_)) { + PADDLE_ENFORCE_EQ(x_dim_i, out_dim_i); + prod_batchsize_dims_ *= x_dim_i; + } else { + prod_x_ins_dims_ *= x_dim_i; + prod_out_ins_dims_ *= out_dim_i; + } + } + } + + HOSTDEVICE void operator()(size_t ins_idx) { + typename Random::Engine engine(seed_); + engine.discard(ins_idx * (rank_ - num_batchsize_dims_)); + size_t offsets[9]; + for (int i = num_batchsize_dims_; i < rank_; ++i) { + typename Random::template UniformIntDist dist( + 0, x_dims_[i] - out_dims_[i]); + offsets[i - num_batchsize_dims_] = dist(engine); + } + + const T* x = x_ + ins_idx * prod_x_ins_dims_; + T* out = out_ + ins_idx * prod_out_ins_dims_; + + StridedMemcpy(x, x_dims_ + num_batchsize_dims_, out, + out_dims_ + num_batchsize_dims_, 0, + rank_ - num_batchsize_dims_, prod_x_ins_dims_, + prod_out_ins_dims_, offsets); + } +}; + +template +class RandomCropKernel : public framework::OpKernel { + public: + virtual void Compute(const framework::ExecutionContext& ctx) const { + int64_t seed = 0; + auto& seed_tensor = detail::Ref(ctx.Input("Seed")); + if (seed_tensor.IsInitialized()) { + if (platform::is_cpu_place(seed_tensor.place())) { + seed = *seed_tensor.data(); + } else { + LOG(WARNING) << "It is slow to place seed in GPU memory. Please verify " + "your program"; + framework::LoDTensor cpu_seed; + framework::TensorCopySync(seed_tensor, platform::CPUPlace(), &cpu_seed); + seed = *cpu_seed.data(); + } + } else { + VLOG(5) << "WARNING: The input 'Seed' is not initialized, use attribute " + "'startup_seed' instead."; + seed = ctx.Attr("startup_seed"); + } + auto shape = ctx.Attr>("shape"); + auto& x = detail::Ref(ctx.Input("X")); + auto& out = detail::Ref(ctx.Output("Out")); + + int num_batchsize_dims = x.dims().size() - shape.size(); + RandomCropFunctor functor( + x.data(), out.mutable_data(ctx.GetPlace()), x.dims(), out.dims(), + num_batchsize_dims, seed); + platform::ForRange for_range( + ctx.template device_context(), + functor.prod_batchsize_dims_); + + for_range(functor); + + Random::Engine engine(seed); + engine.discard(functor.prod_batchsize_dims_ * + (functor.rank_ - functor.num_batchsize_dims_)); + *ctx.Output("SeedOut")->mutable_data( + framework::make_ddim({1}), platform::CPUPlace()) = engine(); + } +}; + +// TODO(fengjiayi): Backward of random crop op + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/rank_loss_op.cc b/paddle/fluid/operators/rank_loss_op.cc index eb9ff8de3e..313cf01541 100644 --- a/paddle/fluid/operators/rank_loss_op.cc +++ b/paddle/fluid/operators/rank_loss_op.cc @@ -46,8 +46,7 @@ class RankLossOp : public framework::OperatorWithKernel { class RankLossOpMaker : public framework::OpProtoAndCheckerMaker { public: - RankLossOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Label", "(2-D Tensor with shape [batch_size x 1]) " "The label indicating A ranked higher than B or not."); diff --git a/paddle/fluid/operators/read_op.cc b/paddle/fluid/operators/read_op.cc index bf02b99589..a0d640b202 100644 --- a/paddle/fluid/operators/read_op.cc +++ b/paddle/fluid/operators/read_op.cc @@ -15,6 +15,7 @@ #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -65,10 +66,26 @@ class ReadOp : public framework::OperatorBase { .GetMutable(); std::vector out_arg_names = Outputs("Out"); std::vector ins; + + // For profiling + platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); + auto& ctx = *pool.Get(dev_place); + platform::RecordEvent record_event(Type(), &ctx); + reader->ReadNext(&ins); - PADDLE_ENFORCE(!ins.empty(), "There is no next data."); + if (ins.empty()) { + if (Attr("throw_eof_exp")) { + PADDLE_THROW_EOF(); + } else { + ins.resize(out_arg_names.size()); + for (auto& tensor : ins) { + // data type is not important for subsequent DataBalanceOpHandle + tensor.mutable_data(framework::make_ddim({0}), dev_place); + } + } + } PADDLE_ENFORCE_EQ(ins.size(), out_arg_names.size()); - for (size_t i = 0; i < ins.size(); ++i) { + for (size_t i = 0; i < out_arg_names.size(); ++i) { auto* out = scope.FindVar(out_arg_names[i])->GetMutable(); out->ShareDataWith(ins[i]); @@ -79,10 +96,17 @@ class ReadOp : public framework::OperatorBase { class ReadOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReadOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(op_proto, op_checker) { + void Make() override { AddInput("Reader", "(ReaderHolder) The executed reader."); AddOutput("Out", "(LoDTensor) The output data.").AsDuplicable(); + AddAttr( + "throw_eof_exp", + "If set true, an exception will be thrown when the Reader " + "yields empty (which means there is no next data).\n" + "NOTES: This flag must be true always. It will be set to false" + " only when the data-balance is enabled in ParallelExecutor" + " and it is set by ParallelExecutor instance, not users.") + .SetDefault(true); AddComment(R"DOC( Read Operator diff --git a/paddle/fluid/operators/reader/CMakeLists.txt b/paddle/fluid/operators/reader/CMakeLists.txt index 3106978eb0..728197377d 100644 --- a/paddle/fluid/operators/reader/CMakeLists.txt +++ b/paddle/fluid/operators/reader/CMakeLists.txt @@ -15,14 +15,16 @@ function(reader_library TARGET_NAME) PARENT_SCOPE) endfunction() -reader_library(open_files_op SRCS open_files_op.cc) +cc_library(buffered_reader SRCS buffered_reader.cc DEPS reader simple_threadpool) +reader_library(open_files_op SRCS open_files_op.cc DEPS buffered_reader) reader_library(create_random_data_generator_op SRCS create_random_data_generator_op.cc) reader_library(create_shuffle_reader_op SRCS create_shuffle_reader_op.cc) reader_library(create_batch_reader_op SRCS create_batch_reader_op.cc) reader_library(create_recordio_file_reader_op SRCS create_recordio_file_reader_op.cc) -reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc) +reader_library(create_double_buffer_reader_op SRCS create_double_buffer_reader_op.cc DEPS buffered_reader) reader_library(create_multi_pass_reader_op SRCS create_multi_pass_reader_op.cc) -reader_library(create_threaded_reader_op SRCS create_threaded_reader_op.cc) +reader_library(create_custom_reader_op SRCS create_custom_reader_op.cc) +reader_library(create_py_reader_op SRCS create_py_reader_op.cc) cc_test(reader_blocking_queue_test SRCS reader_blocking_queue_test.cc) # Export local libraries to parent diff --git a/paddle/fluid/operators/reader/blocking_queue.h b/paddle/fluid/operators/reader/blocking_queue.h index 71684b1417..28cc91a5ed 100644 --- a/paddle/fluid/operators/reader/blocking_queue.h +++ b/paddle/fluid/operators/reader/blocking_queue.h @@ -81,6 +81,15 @@ class BlockingQueue { } } + void ReOpen() { + std::lock_guard lock(mutex_); + closed_ = false; + std::deque new_deque; + queue_.swap(new_deque); + send_cv_.notify_all(); + receive_cv_.notify_all(); + } + void Close() { std::lock_guard lock(mutex_); closed_ = true; @@ -88,24 +97,29 @@ class BlockingQueue { receive_cv_.notify_all(); } - bool IsClosed() { + bool IsClosed() const { std::lock_guard lock(mutex_); return closed_; } - size_t Cap() { + size_t Cap() const { std::lock_guard lock(mutex_); return capacity_; } + size_t Size() const { + std::lock_guard lock(mutex_); + return queue_.size(); + } + private: size_t capacity_; bool closed_; std::deque queue_; - std::mutex mutex_; - std::condition_variable receive_cv_; - std::condition_variable send_cv_; + mutable std::mutex mutex_; + mutable std::condition_variable receive_cv_; + mutable std::condition_variable send_cv_; }; } // namespace reader } // namespace operators diff --git a/paddle/fluid/operators/reader/buffered_reader.cc b/paddle/fluid/operators/reader/buffered_reader.cc new file mode 100644 index 0000000000..26ff221dfa --- /dev/null +++ b/paddle/fluid/operators/reader/buffered_reader.cc @@ -0,0 +1,108 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reader/buffered_reader.h" +#include + +namespace paddle { +namespace operators { +namespace reader { +BufferedReader::~BufferedReader() { + reader_->Shutdown(); + while (!position_.empty()) { + position_.front().wait(); + position_.pop(); + } +} + +BufferedReader::BufferedReader( + const std::shared_ptr &reader, + const platform::Place &place, size_t buffer_size) + : framework::DecoratedReader(reader), + thread_pool_(1), + place_(place), + buffer_size_(buffer_size) { + cpu_buffer_.resize(buffer_size); + gpu_buffer_.resize(buffer_size); + ReadTillBufferFullAsync(); +} + +void BufferedReader::ReadTillBufferFullAsync() { + PADDLE_ENFORCE_EQ(position_.size(), 0U); + for (size_t i = 0; i < buffer_size_; ++i) { + ReadAsync(i); + } +} + +void BufferedReader::ReadAsync(size_t i) { + position_.emplace(thread_pool_.enqueue([this, i]() -> size_t { + TensorVec &cpu = cpu_buffer_[i]; + reader_->ReadNext(&cpu); + + if (cpu.empty()) { + return -1UL; + } + + if (platform::is_gpu_place(place_)) { + TensorVec &gpu = gpu_buffer_[i]; + gpu.resize(cpu.size()); + for (size_t i = 0; i < cpu.size(); ++i) { + framework::TensorCopySync(cpu[i], place_, &gpu[i]); + gpu[i].set_lod(cpu[i].lod()); + } + } + return i; + })); +} + +void BufferedReader::ShutdownImpl() { + reader_->Shutdown(); + while (!position_.empty()) { + position_.pop(); + } + prev_pos_ = -1UL; +} + +void BufferedReader::StartImpl() { + reader_->Start(); + ReadTillBufferFullAsync(); +} + +void BufferedReader::ReadNextImpl(std::vector *out) { + if (position_.empty()) { + out->clear(); + return; + } + size_t i = position_.front().get(); + position_.pop(); + + if (i == -1UL) { + ReadNextImpl(out); + return; + } + + *out = platform::is_gpu_place(place_) ? gpu_buffer_[i] : cpu_buffer_[i]; + + // Do not push current position into ReadAsync. Push the previous position + // Since all computation in fluid are async, change the data of + // current position may cause data error. + if (prev_pos_ != -1Ul) { + ReadAsync(prev_pos_); + } + prev_pos_ = i; +} + +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/buffered_reader.h b/paddle/fluid/operators/reader/buffered_reader.h new file mode 100644 index 0000000000..cbe2bc1b5f --- /dev/null +++ b/paddle/fluid/operators/reader/buffered_reader.h @@ -0,0 +1,66 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include +#include "ThreadPool.h" +#include "paddle/fluid/framework/reader.h" + +namespace paddle { +namespace operators { +namespace reader { + +class BufferedReader : public framework::DecoratedReader { + using TensorVec = std::vector; + using VecFuture = std::future; + + public: + BufferedReader(const std::shared_ptr& reader, + const platform::Place& place, size_t buffer_size); + + ~BufferedReader() override; + + private: + void ReadTillBufferFullAsync(); + + void ReadAsync(size_t i); + + protected: + void ShutdownImpl() override; + void StartImpl() override; + void ReadNextImpl(std::vector* out) override; + + private: + ThreadPool thread_pool_; + platform::Place place_; + const size_t buffer_size_; + + std::queue> position_; + + // The buffer for reading data. + // NOTE: the simplest way to implement buffered reader is do not use any + // buffer, just read async and create futures as buffer size. However, to + // malloc tensors every time is extremely slow. Here we store all data in + // buffers and prevent alloc every time. + std::vector cpu_buffer_; + std::vector gpu_buffer_; + size_t prev_pos_{-1UL}; +}; + +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/create_batch_reader_op.cc b/paddle/fluid/operators/reader/create_batch_reader_op.cc index 04c5872bef..e17c2ffd39 100644 --- a/paddle/fluid/operators/reader/create_batch_reader_op.cc +++ b/paddle/fluid/operators/reader/create_batch_reader_op.cc @@ -20,15 +20,19 @@ namespace reader { class BatchReader : public framework::DecoratedReader { public: - BatchReader(ReaderBase* reader, int batch_size) - : DecoratedReader(reader), batch_size_(batch_size) { + BatchReader(const std::shared_ptr& reader, int batch_size, + bool discard_leftover) + : DecoratedReader(reader), + batch_size_(static_cast(batch_size)), + discard_leftover_(discard_leftover) { buffer_.reserve(batch_size_); } - void ReadNext(std::vector* out) override; + void ReadNextImpl(std::vector* out) override; private: - int batch_size_; + size_t batch_size_; + bool discard_leftover_; std::vector> buffer_; }; @@ -46,18 +50,22 @@ class CreateBatchReaderOp : public framework::OperatorBase { } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - out->Reset( - new BatchReader(underlying_reader.Get(), Attr("batch_size"))); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, Attr("batch_size"), + Attr("discard_leftover"))); } }; class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateBatchReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("batch_size", "How many instances the batch reader yields each time.") .GreaterThan(0); + AddAttr("discard_leftover", + "If true, the leftover instances that are not enough for a " + "new batch will be discarded.") + .SetDefault(true); AddComment(R"DOC( CreateBatchReader Operator @@ -67,10 +75,10 @@ class CreateBatchReaderOpMaker : public DecoratedReaderMakerBase { } }; -void BatchReader::ReadNext(std::vector* out) { +void BatchReader::ReadNextImpl(std::vector* out) { buffer_.clear(); buffer_.reserve(batch_size_); - for (int i = 0; i < batch_size_; ++i) { + for (size_t i = 0; i < batch_size_; ++i) { buffer_.push_back(std::vector()); reader_->ReadNext(&buffer_.back()); if (buffer_.back().empty()) { @@ -78,15 +86,18 @@ void BatchReader::ReadNext(std::vector* out) { break; } } + if (discard_leftover_ && buffer_.size() < batch_size_) { + buffer_.clear(); + } // Concat instances out->clear(); if (buffer_.empty()) { // if buffer_ is empty, the 'out' will return as an empty vector. return; } - int out_num = buffer_[0].size(); + size_t out_num = buffer_[0].size(); out->reserve(out_num); - for (int j = 0; j < out_num; ++j) { + for (size_t j = 0; j < out_num; ++j) { // Merge shape and check date type std::type_index batch_type = buffer_[0][j].type(); framework::DDim batch_shape = buffer_[0][j].dims(); diff --git a/paddle/fluid/operators/reader/create_custom_reader_op.cc b/paddle/fluid/operators/reader/create_custom_reader_op.cc new file mode 100644 index 0000000000..85394b336f --- /dev/null +++ b/paddle/fluid/operators/reader/create_custom_reader_op.cc @@ -0,0 +1,190 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/operators/detail/safe_ref.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" + +namespace paddle { +namespace operators { +namespace reader { + +class CustomReader : public framework::DecoratedReader { + public: + CustomReader(const std::shared_ptr& reader, + const framework::BlockDesc& sub_block, + const std::vector& source_var_names, + const std::vector& sink_var_names) + : DecoratedReader(reader), + program_(*sub_block.Program()), + sub_block_id_(sub_block.ID()), + exe_(framework::Executor(platform::CPUPlace())), + source_var_names_(source_var_names), + sink_var_names_(sink_var_names) {} + + void ReadNextImpl(std::vector* out) override; + + private: + const framework::ProgramDesc program_; + int sub_block_id_; + framework::Executor exe_; + framework::Scope scope_; + + std::vector source_var_names_; + std::vector sink_var_names_; +}; + +class CreateCustomReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + auto* sub_block = Attr("sub_block"); + if (out->Get() != nullptr) { + return; + } + const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) + ->Get(); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, *sub_block, + Attr>("source_var_names"), + Attr>("sink_var_names"))); + } +}; + +class CreateCustomReaderOpMaker : public DecoratedReaderMakerBase { + protected: + void Apply() override { + AddAttr( + "sub_block", "The block to hold all preprocessing operators."); + AddAttr>( + "source_var_names", + "Source variables are starting points of data preprocessing. They hold " + "preprocessing's input tensors. Each source variable corresponds to " + "one of underlying reader's output datas."); + AddAttr>( + "sink_var_names", + "Sink variables are ending points of data preprocessing. They hold " + "preprocessing's output tensors. Each sink variable corresponds to " + "one of custom reader's output datas."); + AddComment(R"DOC( + CreateCustomReader Operator + + A custom reader can be used for input data preprocessing. + A custom reader holds its own sub-block, which will be executed in CPU + in its 'ReadNext()' function. Users can configurate their own + preprocessing pipelines by inserting operators into custom reader's + sub-block. + )DOC"); + } +}; + +class CustomReaderInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(!ctx->IsRuntime(), + "'CustomReaderInferShape' should only be invoked during " + "compile time."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "The output decorated reader should not be null."); + const auto* sub_block = + ctx->Attrs().Get("sub_block"); + const auto sink_var_names = + ctx->Attrs().Get>("sink_var_names"); + std::vector> res_dims; + std::vector res_lod_levels; + for (const std::string& var_name : sink_var_names) { + auto* sink_var = sub_block->FindVar(var_name); + PADDLE_ENFORCE_NOT_NULL(sink_var); + res_dims.emplace_back(sink_var->GetShape()); + res_lod_levels.push_back(sink_var->GetLoDLevel()); + } + auto* out_reader = + boost::get(ctx->GetOutputVarPtrs("Out")[0]); + out_reader->SetShapes(res_dims); + out_reader->SetLoDLevels(res_lod_levels); + } +}; + +class CustomReaderInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc& op_desc, + framework::BlockDesc* block) const override { + framework::VarDesc* out_reader = block->FindVar(op_desc.Output("Out")[0]); + PADDLE_ENFORCE_NOT_NULL(out_reader); + out_reader->SetType(framework::proto::VarType::READER); + + auto sink_var_names = + boost::get>(op_desc.GetAttr("sink_var_names")); + const auto* sub_block = + boost::get(op_desc.GetAttr("sub_block")); + std::vector res_data_types; + for (const std::string& var_name : sink_var_names) { + framework::VarDesc* var = sub_block->FindVar(var_name); + PADDLE_ENFORCE_NOT_NULL(var); + res_data_types.emplace_back(var->GetDataType()); + } + out_reader->SetDataTypes(res_data_types); + } +}; + +void CustomReader::ReadNextImpl(std::vector* out) { + out->clear(); + std::vector underlying_outs; + reader_->ReadNext(&underlying_outs); + if (underlying_outs.empty()) { + // There is not next data. + return; + } + PADDLE_ENFORCE(source_var_names_.size() == underlying_outs.size(), + "The size of source_var_names(%d) and the size of " + "underlying_outs(%d) are not consistent. Each feeding element " + "must have its own source variable.", + source_var_names_.size(), underlying_outs.size()); + // The scope for CustomReader's sub-block should be independent and shouldn't + // be any other computation scope's child. Otherwise, data preprocessing and + // compution cannot be concurrent. + framework::Scope* exe_scope = &scope_.NewScope(); + // 1. Copy LoDTensors from underlying reader's output to source variables. + for (size_t i = 0; i < source_var_names_.size(); ++i) { + framework::Variable* var = exe_scope->Var(source_var_names_[i]); + framework::LoDTensor* tensor = var->GetMutable(); + tensor->ShareDataWith(underlying_outs[i]); + tensor->set_lod(underlying_outs[i].lod()); + } + // 2. Run the sub-block. + exe_.Run(program_, exe_scope, sub_block_id_, false, true); + // 3. Copy LoDTensors from sink variables to out. + out->resize(sink_var_names_.size()); + for (size_t i = 0; i < sink_var_names_.size(); ++i) { + const auto& tensor = detail::Ref(exe_scope->FindVar(sink_var_names_[i])) + .Get(); + framework::TensorCopySync(tensor, platform::CPUPlace(), &(*out)[i]); + } + scope_.DeleteScope(exe_scope); +} + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators::reader; +REGISTER_OPERATOR(create_custom_reader, ops::CreateCustomReaderOp, + ops::CreateCustomReaderOpMaker, ops::CustomReaderInferShape, + ops::CustomReaderInferVarType, + paddle::framework::EmptyGradOpMaker) diff --git a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc index e5efac4615..ed719f91d0 100644 --- a/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc +++ b/paddle/fluid/operators/reader/create_double_buffer_reader_op.cc @@ -12,73 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include // NOLINT - -#include "paddle/fluid/operators/reader/blocking_queue.h" +#include "paddle/fluid/operators/reader/buffered_reader.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { namespace operators { namespace reader { - -// 'Double buffer' means we shall maintain two batches of input data at the same -// time. So the kCacheSize shoul be at least 2. -static constexpr size_t kCacheSize = 3; -// There will be two bacthes out of the channel during training: -// 1. the one waiting to be sent to the channel -// 2. the one just be received from the channel, which is also being used by -// subsequent operators. -// So the channel size should be kChacheSize - 2 -static constexpr size_t kChannelSize = 1; // kCacheSize - 2 - -class DoubleBufferReader : public framework::DecoratedReader { - public: - explicit DoubleBufferReader( - ReaderBase* reader, platform::Place target_place = platform::CPUPlace()) - : DecoratedReader(reader), place_(target_place) { - cpu_tensor_cache_.resize(kCacheSize); - gpu_tensor_cache_.resize(kCacheSize); -#ifdef PADDLE_WITH_CUDA - if (platform::is_gpu_place(place_)) { - for (size_t i = 0; i < kCacheSize; ++i) { - ctxs_.emplace_back(new platform::CUDADeviceContext( - boost::get(place_))); - } - } -#endif - StartPrefetcher(); - } - - void ReadNext(std::vector* out) override; - void ReInit() override; - - ~DoubleBufferReader() { EndPrefetcher(); } - - private: - void StartPrefetcher() { - channel_ = new reader::BlockingQueue(kChannelSize); - prefetcher_ = std::thread([this] { PrefetchThreadFunc(); }); - } - - void EndPrefetcher() { - channel_->Close(); - if (prefetcher_.joinable()) { - prefetcher_.join(); - } - delete channel_; - channel_ = nullptr; - } - - void PrefetchThreadFunc(); - - std::thread prefetcher_; - reader::BlockingQueue* channel_; - platform::Place place_; - std::vector> cpu_tensor_cache_; - std::vector> gpu_tensor_cache_; - std::vector> ctxs_; -}; - class CreateDoubleBufferReaderOp : public framework::OperatorBase { public: using framework::OperatorBase::OperatorBase; @@ -108,19 +47,19 @@ class CreateDoubleBufferReaderOp : public framework::OperatorBase { place = platform::CUDAPlace(static_cast(num)); } - out->Reset(new DoubleBufferReader(underlying_reader.Get(), place)); + out->Reset(framework::MakeDecoratedReader(underlying_reader, + place, 2)); } }; class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateDoubleBufferReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddComment(R"DOC( CreateDoubleBufferReader Operator A double buffer reader takes another reader as its 'underlying reader'. - It launches another thread to execute the 'underlying reader' asynchronously, + It launches another thread to execute the 'underlying reader' asynchronously, which prevents reading process from blocking subsequent training. )DOC"); std::unordered_set enum_range; @@ -136,57 +75,6 @@ class CreateDoubleBufferReaderOpMaker : public DecoratedReaderMakerBase { } }; -void DoubleBufferReader::ReadNext(std::vector* out) { - size_t cached_tensor_id; - if (channel_->Receive(&cached_tensor_id)) { - if (platform::is_gpu_place(place_)) { - *out = gpu_tensor_cache_[cached_tensor_id]; - } else { - // CPU place - *out = cpu_tensor_cache_[cached_tensor_id]; - } - } else { - out->clear(); - } -} - -void DoubleBufferReader::ReInit() { - reader_->ReInit(); - EndPrefetcher(); - StartPrefetcher(); -} - -void DoubleBufferReader::PrefetchThreadFunc() { - VLOG(5) << "A new prefetch thread starts."; - size_t cached_tensor_id = 0; - while (true) { - auto& cpu_batch = cpu_tensor_cache_[cached_tensor_id]; - reader_->ReadNext(&cpu_batch); - if (cpu_batch.empty()) { - // The underlying reader have no next data. - break; - } - if (platform::is_gpu_place(place_)) { - auto& gpu_batch = gpu_tensor_cache_[cached_tensor_id]; - gpu_batch.resize(cpu_batch.size()); - for (size_t i = 0; i < cpu_batch.size(); ++i) { - // TODO(fengjiayi): Use asynchronous TensorCopy instead - framework::TensorCopySync(cpu_batch[i], place_, &gpu_batch[i]); - gpu_batch[i].set_lod(cpu_batch[i].lod()); - } - } - if (!channel_->Send(cached_tensor_id)) { - VLOG(5) << "WARNING: The double buffer channel has been closed. The " - "prefetch thread will terminate."; - break; - } - ++cached_tensor_id; - cached_tensor_id %= kCacheSize; - } - channel_->Close(); - VLOG(5) << "Prefetch thread terminates."; -} - } // namespace reader } // namespace operators } // namespace paddle diff --git a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc index 0573345ba5..0a225597d3 100644 --- a/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc +++ b/paddle/fluid/operators/reader/create_multi_pass_reader_op.cc @@ -21,26 +21,25 @@ namespace reader { class MultiPassReader : public framework::DecoratedReader { public: - MultiPassReader(ReaderBase* reader, int pass_num) + MultiPassReader(const std::shared_ptr& reader, int pass_num) : DecoratedReader(reader), pass_num_(pass_num), pass_count_(0) {} - void ReadNext(std::vector* out) override { + void ReadNextImpl(std::vector* out) override { reader_->ReadNext(out); - if (out->empty()) { + if (out->empty() && pass_count_ < pass_num_ - 1) { + reader_->Shutdown(); + reader_->Start(); + reader_->ReadNext(out); ++pass_count_; - if (pass_count_ < pass_num_) { - reader_->ReInit(); - reader_->ReadNext(out); - } } } - void ReInit() override { + private: + void StartImpl() override { pass_count_ = 0; - reader_->ReInit(); + reader_->Start(); } - private: int pass_num_; mutable int pass_count_; }; @@ -60,25 +59,25 @@ class CreateMultiPassReaderOp : public framework::OperatorBase { const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); int pass_num = Attr("pass_num"); - out->Reset(new MultiPassReader(underlying_reader.Get(), pass_num)); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, pass_num)); } }; class CreateMultiPassReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateMultiPassReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("pass_num", "The number of pass to run.").GreaterThan(0); AddComment(R"DOC( CreateMultiPassReader Operator - This operator creates a multi-pass reader. A multi-pass reader - is used to yield data for several pass training continuously. + This operator creates a multi-pass reader. A multi-pass reader + is used to yield data for several pass training continuously. It takes the number of passes to run as one of its attributes - ('pass_num'), and maintains a pass counter to record how many - passes it has completed. When the underlying reader reaches the - EOF, the multi-pass reader checks whether it has completed training - of the given number of pass. If not, the underlying reader will + ('pass_num'), and maintains a pass counter to record how many + passes it has completed. When the underlying reader reaches the + EOF, the multi-pass reader checks whether it has completed training + of the given number of pass. If not, the underlying reader will be re-initialized and starts a new pass automatically. )DOC"); } diff --git a/paddle/fluid/operators/reader/create_py_reader_op.cc b/paddle/fluid/operators/reader/create_py_reader_op.cc new file mode 100644 index 0000000000..0f31ca1a94 --- /dev/null +++ b/paddle/fluid/operators/reader/create_py_reader_op.cc @@ -0,0 +1,89 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" +#include "paddle/fluid/operators/reader/reader_op_registry.h" + +namespace paddle { +namespace operators { +namespace reader { + +class PyReader : public framework::FileReader { + public: + explicit PyReader(const std::shared_ptr& queue) + : framework::FileReader() { + PADDLE_ENFORCE(queue != nullptr, "LoDTensorBlockingQueue must not be null"); + queue_ = queue; + } + + void ReadNext(std::vector* out) override { + bool success; + *out = queue_->Pop(&success); + if (!success) out->clear(); + } + + ~PyReader() { queue_->Close(); } + + void Shutdown() override { queue_->Close(); } + + void Start() override { queue_->ReOpen(); } + + private: + std::shared_ptr queue_; +}; + +class CreatePyReaderOp : public framework::OperatorBase { + public: + using framework::OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope& scope, + const platform::Place& dev_place) const override { + auto* out = scope.FindVar(Output("Out")) + ->template GetMutable(); + if (out->Get() != nullptr) return; + + const std::string& queue_name = Input("blocking_queue"); + auto* queue_holder_var = scope.FindVar(queue_name); + PADDLE_ENFORCE_NOT_NULL( + queue_holder_var, + "No LoDTensorBlockingQueueHolder variable with name %s found", + queue_name); + auto* queue_holder = + queue_holder_var->template GetMutable(); + + out->Reset(std::make_shared(queue_holder->GetQueue())); + } +}; + +class CreatePyReaderOpMaker : public FileReaderMakerBase { + protected: + void Apply() override { + AddInput("blocking_queue", + "Name of the `LoDTensorBlockingQueueHolder` variable"); + + AddComment(R"DOC( + Create PyReader to support LoDTensor data feeding in Python side. + )DOC"); + } +}; + +} // namespace reader +} // namespace operators +} // namespace paddle + +namespace reader = ::paddle::operators::reader; + +REGISTER_FILE_READER_OPERATOR(create_py_reader, reader::CreatePyReaderOp, + reader::CreatePyReaderOpMaker); diff --git a/paddle/fluid/operators/reader/create_random_data_generator_op.cc b/paddle/fluid/operators/reader/create_random_data_generator_op.cc index d1cb8e47da..e5c116dfcd 100644 --- a/paddle/fluid/operators/reader/create_random_data_generator_op.cc +++ b/paddle/fluid/operators/reader/create_random_data_generator_op.cc @@ -19,19 +19,20 @@ namespace operators { namespace reader { template -class RandomDataGenerator : public framework::ReaderBase { +class RandomDataGenerator : public framework::FileReader { public: - RandomDataGenerator(const std::vector& shapes, float min, - float max) - : framework::ReaderBase(), min_(min), max_(max), shapes_(shapes) { - PADDLE_ENFORCE_LE( - min, max, "'min' shouldn't be greater than 'max'.(%f vs %f)", min, max); + RandomDataGenerator(const std::vector& shapes, float low, + float high) + : framework::FileReader(), low_(low), high_(high), shapes_(shapes) { + PADDLE_ENFORCE_LE(low, high, + "'low' shouldn't be greater than 'high'.(%f vs %f)", low, + high); unsigned int seed = std::random_device()(); engine_.seed(seed); - dist_ = std::uniform_real_distribution(min_, max_); + dist_ = std::uniform_real_distribution(low_, high_); } - void ReadNext(std::vector* out) override { + void ReadNextImpl(std::vector* out) override { out->clear(); out->reserve(shapes_.size()); for (const framework::DDim& shape : shapes_) { @@ -50,11 +51,9 @@ class RandomDataGenerator : public framework::ReaderBase { } } - void ReInit() override { return; } - private: - float min_; - float max_; + float low_; + float high_; std::minstd_rand engine_; std::uniform_real_distribution dist_; std::vector shapes_; @@ -78,23 +77,22 @@ class CreateRandomDataGeneratorOp : public framework::OperatorBase { std::vector shapes = RestoreShapes(shape_concat, ranks); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new RandomDataGenerator(shapes, Attr("min"), - Attr("max"))); + out->Reset(std::make_shared>( + shapes, Attr("low"), Attr("high"))); } }; class CreateRandomDataGeneratorOpMaker : public FileReaderMakerBase { - public: - CreateRandomDataGeneratorOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : FileReaderMakerBase(op_proto, op_checker) { - AddAttr("min", "The lower bound of reader's uniform distribution."); - AddAttr("max", "The upper bound of reader's uniform distribution."); + protected: + void Apply() override { + AddAttr("low", "The lower bound of reader's uniform distribution."); + AddAttr("high", "The upper bound of reader's uniform distribution."); AddComment(R"DOC( CreateRandomDataGenerator Operator This Op creates a random reader. The reader generates random data instead of really reading from files. - Generated data follow an uniform distribution between 'min' and 'max'. + Generated data follow an uniform distribution between 'low' and 'high'. )DOC"); } }; diff --git a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc index 2ae2972556..a08a9dbd0d 100644 --- a/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc +++ b/paddle/fluid/operators/reader/create_recordio_file_reader_op.cc @@ -21,10 +21,8 @@ namespace reader { template class RecordIOFileReader : public framework::FileReader { public: - explicit RecordIOFileReader(const std::string& filename, - const std::vector& dims) - : FileReader(dims), - scanner_(filename), + explicit RecordIOFileReader(const std::string& filename) + : scanner_(filename), dev_ctx_(*platform::DeviceContextPool::Instance().Get( platform::CPUPlace())) { if (ThreadSafe) { @@ -33,18 +31,21 @@ class RecordIOFileReader : public framework::FileReader { LOG(INFO) << "Creating file reader" << filename; } - void ReInit() override { scanner_.Reset(); } - protected: void ReadNextImpl(std::vector* out) override { + std::unique_ptr> guard; if (ThreadSafe) { - std::lock_guard guard(*mutex_); - *out = framework::ReadFromRecordIO(&scanner_, dev_ctx_); - } else { - *out = framework::ReadFromRecordIO(&scanner_, dev_ctx_); + guard.reset(new std::lock_guard(*mutex_)); + } + + bool ok = framework::ReadFromRecordIO(&scanner_, dev_ctx_, out); + if (!ok) { + out->clear(); } } + void StartImpl() override { scanner_.Reset(); } + private: std::unique_ptr mutex_; recordio::Scanner scanner_; @@ -58,32 +59,26 @@ class CreateRecordIOReaderOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope& scope, const platform::Place& dev_place) const override { - const auto& shape_concat = Attr>("shape_concat"); - const auto& ranks = Attr>("ranks"); - PADDLE_ENFORCE(!shape_concat.empty() && !ranks.empty()); - PADDLE_ENFORCE_EQ(std::accumulate(ranks.begin(), ranks.end(), 0), - static_cast(shape_concat.size()), - "The accumulate of all ranks should be equal to the " - "shape concat's length."); std::string filename = Attr("filename"); - auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new RecordIOFileReader( - filename, RestoreShapes(shape_concat, ranks))); + out->Reset(std::make_shared>(filename)); } }; class CreateRecordIOReaderOpMaker : public FileReaderMakerBase { - public: - CreateRecordIOReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : FileReaderMakerBase(op_proto, op_checker) { - AddAttr("filename", "The filename of record io reader"); + protected: + void Apply() override { + AddAttr( + "filename", + "The filename of record file. This file will given to reader."); AddComment(R"DOC( - CreateRecordIOReader Operator +Open a recordio file and return the reader object. The returned reader object +is thread-safe. - Create a reader from a record io file +NOTE: This is a very low-level API. It is used for debugging data file or +training. Please use `open_files` instead of this API for production usage. )DOC"); } }; diff --git a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc index 13825d6591..3f72890a7c 100644 --- a/paddle/fluid/operators/reader/create_shuffle_reader_op.cc +++ b/paddle/fluid/operators/reader/create_shuffle_reader_op.cc @@ -23,7 +23,8 @@ namespace reader { class ShuffleReader : public framework::DecoratedReader { public: - ShuffleReader(ReaderBase* reader, size_t buffer_size, size_t seed = 0) + ShuffleReader(const std::shared_ptr& reader, size_t buffer_size, + size_t seed = 0) : DecoratedReader(reader), buffer_size_(buffer_size), seed_(seed) { VLOG(10) << "Create shuffle reader of " << reader_; if (seed_ == 0) { @@ -33,7 +34,7 @@ class ShuffleReader : public framework::DecoratedReader { ReloadBuffer(); } - void ReadNext(std::vector* out) override { + void ReadNextImpl(std::vector* out) override { out->clear(); if (iteration_pos_ >= buffer_.size()) { VLOG(10) << "Resetting shuffle buffer"; @@ -46,6 +47,17 @@ class ShuffleReader : public framework::DecoratedReader { } private: + void ShutdownImpl() override { + reader_->Shutdown(); + buffer_.clear(); + iteration_pos_ = 0; + } + + void StartImpl() override { + reader_->Start(); + ReloadBuffer(); + } + void ReloadBuffer() { buffer_.clear(); buffer_.reserve(buffer_size_); @@ -85,16 +97,14 @@ class CreateShuffleReaderOp : public framework::OperatorBase { } const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) ->Get(); - out->Reset( - new ShuffleReader(underlying_reader.Get(), - static_cast(Attr("buffer_size")))); + out->Reset(framework::MakeDecoratedReader( + underlying_reader, static_cast(Attr("buffer_size")))); } }; class CreateShuffleReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateShuffleReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr("buffer_size", "The shuffle buffer size.").GreaterThan(0); AddComment(R"DOC( CreateShuffleReader Operator diff --git a/paddle/fluid/operators/reader/create_threaded_reader_op.cc b/paddle/fluid/operators/reader/create_threaded_reader_op.cc deleted file mode 100644 index 1cb9bd3645..0000000000 --- a/paddle/fluid/operators/reader/create_threaded_reader_op.cc +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/fluid/operators/detail/safe_ref.h" -#include "paddle/fluid/operators/reader/reader_op_registry.h" - -namespace paddle { -namespace operators { -namespace reader { - -class ThreadedReader : public framework::DecoratedReader { - public: - explicit ThreadedReader(ReaderBase* reader) : DecoratedReader(reader) {} - - void ReadNext(std::vector* out) override { - std::lock_guard lock(mutex_); - reader_->ReadNext(out); - } - - void ReInit() override { reader_->ReInit(); } - - private: - std::mutex mutex_; -}; - -class CreateThreadedReaderOp : public framework::OperatorBase { - public: - using framework::OperatorBase::OperatorBase; - - private: - void RunImpl(const framework::Scope& scope, - const platform::Place& dev_place) const override { - auto* out = detail::Ref(scope.FindVar(Output("Out"))) - .GetMutable(); - if (out->Get() != nullptr) { - return; - } - const auto& underlying_reader = scope.FindVar(Input("UnderlyingReader")) - ->Get(); - out->Reset(new ThreadedReader(underlying_reader.Get())); - } -}; - -class CreateThreadedReaderOpMaker : public DecoratedReaderMakerBase { - public: - CreateThreadedReaderOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : DecoratedReaderMakerBase(op_proto, op_checker) { - AddComment(R"DOC( - CreateThreadedReader Operator - - This operator creates a threaded reader. A threaded reader's - 'ReadNext()' can be invoked by several threads at the same - time. - When the attribute 'safe_mode' is true, the threaded reader's - 'ReInit()' is disabled to avoid unexpected bugs in multi-thread - environment. - )DOC"); - } -}; - -} // namespace reader -} // namespace operators -} // namespace paddle - -namespace reader = paddle::operators::reader; -REGISTER_DECORATED_READER_OPERATOR(create_threaded_reader, - reader::CreateThreadedReaderOp, - reader::CreateThreadedReaderOpMaker); diff --git a/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h b/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h new file mode 100644 index 0000000000..4f7cfc24ec --- /dev/null +++ b/paddle/fluid/operators/reader/lod_tensor_blocking_queue.h @@ -0,0 +1,89 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/ddim.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/operators/reader/blocking_queue.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace operators { +namespace reader { + +class LoDTensorBlockingQueueHolder; + +class LoDTensorBlockingQueue { + friend class LoDTensorBlockingQueueHolder; + + private: + LoDTensorBlockingQueue(size_t capacity, + const std::vector& dims) + : queue_(capacity), dims_(dims) {} + + public: + bool Push(const std::vector& lod_tensor_vec) { + return queue_.Send(lod_tensor_vec); + } + + bool Push(std::vector&& lod_tensor_vec) { + return queue_.Send(std::move(lod_tensor_vec)); + } + + std::vector Pop(bool* ok = nullptr) { + std::vector lod_tensor_vec; + bool success = queue_.Receive(&lod_tensor_vec); + if (ok != nullptr) *ok = success; + return lod_tensor_vec; + } + + inline size_t Cap() const { return queue_.Cap(); } + + inline size_t Size() const { return queue_.Size(); } + + inline void ReOpen() { queue_.ReOpen(); } + + inline void Close() { queue_.Close(); } + + inline bool IsClosed() const { return queue_.IsClosed(); } + + private: + BlockingQueue> queue_; + std::vector dims_; +}; + +class LoDTensorBlockingQueueHolder { + public: + void InitOnce(size_t capacity, const std::vector& dims) { + PADDLE_ENFORCE( + queue_ == nullptr, + "LoDTensorBlockingQueueHolder::InitOnce() can only be called once"); + queue_.reset(new LoDTensorBlockingQueue(capacity, dims)); + } + + inline const std::shared_ptr& GetQueue() const { + return queue_; + } + + private: + std::shared_ptr queue_; +}; + +} // namespace reader +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reader/open_files_op.cc b/paddle/fluid/operators/reader/open_files_op.cc index 91ad7d5658..38223e0699 100644 --- a/paddle/fluid/operators/reader/open_files_op.cc +++ b/paddle/fluid/operators/reader/open_files_op.cc @@ -12,150 +12,200 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include +#include #include // NOLINT - +#include "ThreadPool.h" +#include "paddle/fluid/framework/blocking_queue.h" #include "paddle/fluid/operators/reader/blocking_queue.h" +#include "paddle/fluid/operators/reader/buffered_reader.h" #include "paddle/fluid/operators/reader/reader_op_registry.h" namespace paddle { namespace operators { namespace reader { -class MultiFileReader : public framework::ReaderBase { +class IReaderContainer { public: - MultiFileReader(const std::vector& file_names, - const std::vector& dims, size_t thread_num, - size_t buffer_size) - : file_names_(file_names), dims_(dims), buffer_size_(buffer_size) { - prefetchers_.resize(thread_num); - StartNewScheduler(); + virtual ~IReaderContainer() {} + virtual void AppendReader( + std::unique_ptr&& readers) = 0; + virtual void Stop() = 0; + virtual void Start() = 0; + virtual void ReadNext(std::vector* out) = 0; +}; + +class OrderedReaderContainer : public IReaderContainer { + public: + void AppendReader(std::unique_ptr&& reader) override { + pending_.emplace(std::move(reader)); } - void ReadNext(std::vector* out) override; - void ReInit() override; + void Stop() override { + while (!pending_.empty()) { + MoveFrontPendingToDone(); + } + } - ~MultiFileReader() { EndScheduler(); } + void Start() override { std::swap(done_, pending_); } + + void ReadNext(std::vector* out) override { + if (!pending_.empty()) { + pending_.front()->ReadNext(out); + if (out->empty()) { + MoveFrontPendingToDone(); + ReadNext(out); + } + } else { + out->clear(); + } + } private: - void StartNewScheduler(); - void EndScheduler(); - void ScheduleThreadFunc(); - void PrefetchThreadFunc(std::string file_name, size_t thread_idx); - - std::vector file_names_; - std::vector dims_; - std::thread scheduler_; - std::vector prefetchers_; - size_t buffer_size_; - reader::BlockingQueue* waiting_file_idx_; - reader::BlockingQueue* available_thread_idx_; - reader::BlockingQueue>* buffer_; + void MoveFrontPendingToDone() { + pending_.front()->Shutdown(); + pending_.front()->Start(); + done_.emplace(move(pending_.front())); + pending_.pop(); + } + + std::queue> pending_; + std::queue> done_; }; -void MultiFileReader::ReadNext(std::vector* out) { - if (!buffer_->Receive(out)) { - out->clear(); - } -} - -void MultiFileReader::ReInit() { - EndScheduler(); - StartNewScheduler(); -} - -void MultiFileReader::StartNewScheduler() { - size_t thread_num = prefetchers_.size(); - waiting_file_idx_ = new reader::BlockingQueue(file_names_.size()); - available_thread_idx_ = new reader::BlockingQueue(thread_num); - buffer_ = new reader::BlockingQueue>( - buffer_size_); - - for (size_t i = 0; i < file_names_.size(); ++i) { - waiting_file_idx_->Send(i); - } - waiting_file_idx_->Close(); - for (size_t i = 0; i < thread_num; ++i) { - available_thread_idx_->Send(i); - } +class PreemptiveReaderContainer : public IReaderContainer { + using ReaderList = std::list>; - scheduler_ = std::thread([this] { ScheduleThreadFunc(); }); -} + struct FutureItem { + std::vector data_; + ReaderList::iterator reader_it_; + std::exception_ptr exception_; + }; -void MultiFileReader::EndScheduler() { - available_thread_idx_->Close(); - buffer_->Close(); - waiting_file_idx_->Close(); - if (scheduler_.joinable()) { - scheduler_.join(); - } - delete buffer_; - delete available_thread_idx_; - delete waiting_file_idx_; -} - -void MultiFileReader::ScheduleThreadFunc() { - VLOG(5) << "MultiFileReader schedule thread starts."; - size_t completed_thread_num = 0; - size_t thread_idx; - while (available_thread_idx_->Receive(&thread_idx)) { - std::thread& prefetcher = prefetchers_[thread_idx]; - if (prefetcher.joinable()) { - prefetcher.join(); - } - size_t file_idx; - if (waiting_file_idx_->Receive(&file_idx)) { - // Still have files to read. Start a new prefetch thread. - std::string file_name = file_names_[file_idx]; - prefetcher = std::thread([this, file_name, thread_idx] { - PrefetchThreadFunc(file_name, thread_idx); - }); - } else { - // No more file to read. - ++completed_thread_num; - if (completed_thread_num == prefetchers_.size()) { - buffer_->Close(); - break; + using FutureList = std::list>; + + public: + explicit PreemptiveReaderContainer(size_t thread_num) : pool_(thread_num) {} + + void Stop() override { + if (!pending_.empty()) { + for (auto& reader : pending_) { + reader->Shutdown(); + } + for (auto& fu : futures_) { + fu.wait(); + } + futures_.clear(); + for (auto& reader : pending_) { + reader->Start(); + done_.emplace_back(std::move(reader)); } + pending_.clear(); + bool timeout; + complete_queue_.PopAll(1000, &timeout); + PADDLE_ENFORCE(!timeout); } } - // If users invoke ReInit() when scheduler is running, it will close the - // 'avaiable_thread_idx_' and prefecther threads have no way to tell scheduler - // to release their resource. So a check is needed before scheduler ends. - for (auto& p : prefetchers_) { - if (p.joinable()) { - p.join(); + + void Start() override { + for (auto& reader : done_) { + AppendReader(std::move(reader)); } + done_.clear(); } - VLOG(5) << "MultiFileReader schedule thread terminates."; -} - -void MultiFileReader::PrefetchThreadFunc(std::string file_name, - size_t thread_idx) { - VLOG(5) << "The prefetch thread of file '" << file_name << "' starts."; - std::unique_ptr reader = - CreateReaderByFileName(file_name, dims_); - while (true) { - std::vector ins; - reader->ReadNext(&ins); - if (ins.empty()) { - break; + + void ReadNext(std::vector* out) override { + if (!pending_.empty()) { + auto future_it = complete_queue_.Pop(); + FutureItem item = future_it->get(); + if (item.exception_) { + for (auto it = futures_.begin(); it != futures_.end(); ++it) { + if (it != future_it) { + it->wait(); // Wait all other threads complete. + } + } + std::rethrow_exception(item.exception_); + + } else if (item.data_.empty()) { // reader done. + done_.emplace_back(std::move(*item.reader_it_)); + pending_.erase(item.reader_it_); + futures_.erase(future_it); + ReadNext(out); + } else { + *out = item.data_; + // continue read async + ReadAsync(item.reader_it_, &future_it); + } + } else { + out->clear(); } - try { - buffer_->Send(std::move(ins)); - } catch (paddle::platform::EnforceNotMet e) { - VLOG(5) << "WARNING: The buffer channel has been closed. The prefetch " - "thread of file '" - << file_name << "' will terminate."; - break; + } + + private: + void AppendReader(std::unique_ptr&& reader) override { + pending_.emplace_back(std::move(reader)); + auto reader_it = pending_.end(); + --reader_it; + + futures_.emplace_back(); + auto future_it = futures_.end(); + --future_it; + + ReadAsync(reader_it, &future_it); + } + + void ReadAsync(const ReaderList::iterator& reader_it, + FutureList::iterator* future_it_ptr) { + auto& future_it = *future_it_ptr; + *future_it = pool_.enqueue([reader_it, future_it, this] { + try { + FutureItem item; + item.reader_it_ = reader_it; + (*reader_it)->ReadNext(&item.data_); + if (item.data_.empty()) { + (*reader_it)->Shutdown(); + (*reader_it)->Start(); + } + complete_queue_.Push(future_it); + return item; + } catch (...) { + FutureItem item; + item.exception_ = std::current_exception(); + complete_queue_.Push(future_it); + return item; + } + }); + } + + FutureList futures_; + ThreadPool pool_; + framework::BlockingQueue complete_queue_; + std::list> pending_; + std::list> done_; +}; + +class MultiFileReader : public framework::ReaderBase { + public: + MultiFileReader(const std::vector& file_names, + std::unique_ptr&& container) + : container_(std::move(container)) { + for (auto& fn : file_names) { + container_->AppendReader(CreateReaderByFileName(fn)); } } - if (!available_thread_idx_->Send(thread_idx)) { - VLOG(5) << "WARNING: The available_thread_idx_ channel has been closed. " - "Fail to send thread_idx."; + ~MultiFileReader() { container_->Stop(); } + + protected: + void ReadNextImpl(std::vector* out) override { + container_->ReadNext(out); } - VLOG(5) << "The prefetch thread of file '" << file_name << "' terminates."; -} + void ShutdownImpl() override { container_->Stop(); } + void StartImpl() override { container_->Start(); } + + private: + std::unique_ptr container_; +}; class OpenFilesOp : public framework::OperatorBase { public: @@ -173,32 +223,47 @@ class OpenFilesOp : public framework::OperatorBase { "shape concat's length."); const auto& file_names = Attr>("file_names"); PADDLE_ENFORCE(!file_names.empty(), "No file to be read!"); - const size_t thread_num = Attr("thread_num"); - const size_t buffer_size = Attr("buffer_size"); + bool is_test = Attr("is_test"); auto* out = scope.FindVar(Output("Out")) ->template GetMutable(); - out->Reset(new MultiFileReader(file_names, - RestoreShapes(shape_concat, ranks), - thread_num, buffer_size)); + std::unique_ptr container; + + if (is_test) { + container.reset(new OrderedReaderContainer()); + } else { + container.reset(new PreemptiveReaderContainer( + static_cast(Attr("thread_num")))); + } + + std::shared_ptr reader( + new MultiFileReader(file_names, std::move(container))); + auto buffer_size = Attr("buffer_size"); + if (buffer_size > 1) { + reader = framework::MakeDecoratedReader( + reader, platform::CPUPlace(), buffer_size); + } + out->Reset(reader); } }; class OpenFilesOpMaker : public FileReaderMakerBase { - public: - OpenFilesOpMaker(OpProto* op_proto, OpAttrChecker* op_checker) - : FileReaderMakerBase(op_proto, op_checker) { + protected: + void Apply() override { AddAttr>("file_names", "Files to be read."); - AddAttr("thread_num", "The maximal concurrent prefetch thread number.") - .GreaterThan(0); - AddAttr("buffer_size", "The size of prefetch buffer.").GreaterThan(0); + AddAttr("is_test", "Used for testing data.").SetDefault(false); AddComment(R"DOC( OpenFiles Operator - An OpenFilesOp creates a MultiFileReader, which is able to + An OpenFilesOp creates a MultiFileReader, which is able to read data multi-threaded from multiple files. )DOC"); + AddAttr("thread_num", + "The maximal concurrent prefetch thread number. Used only " + "when is_test = False"); + AddAttr("buffer_size", "The reading buffer of these files.") + .GreaterThan(0); } }; diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index 3ff4536819..b82aab1214 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -39,7 +39,7 @@ std::unordered_map& FileReaderRegistry() { } std::unique_ptr CreateReaderByFileName( - const std::string& file_name, const std::vector& dims) { + const std::string& file_name) { size_t separator_pos = file_name.find_last_of(kFileFormatSeparator); PADDLE_ENFORCE_NE(separator_pos, std::string::npos, "File name illegal! A legal file name should be like: " @@ -49,15 +49,12 @@ std::unique_ptr CreateReaderByFileName( auto itor = FileReaderRegistry().find(filetype); PADDLE_ENFORCE(itor != FileReaderRegistry().end(), "No file reader registered for '%s' format.", filetype); - framework::ReaderBase* reader = (itor->second)(file_name, dims); + framework::ReaderBase* reader = (itor->second)(file_name); return std::unique_ptr(reader); } -FileReaderMakerBase::FileReaderMakerBase( - framework::OpProtoAndCheckerMaker::OpProto* op_proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(op_proto, op_checker) { - AddOutput("Out", "(ReaderHolder) The created random reader.").AsDuplicable(); +void FileReaderMakerBase::Make() { + AddOutput("Out", "(ReaderHolder): The created random reader.").AsDuplicable(); AddAttr>("shape_concat", "The concat of all data's shapes."); AddAttr>( "ranks", @@ -68,6 +65,7 @@ FileReaderMakerBase::FileReaderMakerBase( "It means the reader will generate two data each time," "whose shapes are [2,3,4] and [5,6] respectively."); AddAttr>("lod_levels", "The LoD levels of each data."); + Apply(); } void FileReaderInferShape::operator()(framework::InferShapeContext* ctx) const { @@ -117,6 +115,7 @@ void DecoratedReaderInferShape::operator()( boost::get(ctx->GetOutputVarPtrs("Out")[0]); out_reader->SetLoDLevels(in_reader->GetLoDLevels()); } + void DecoratedReaderInferVarType::operator()( const framework::OpDesc& op_desc, framework::BlockDesc* block) const { std::string in_reader_name = op_desc.Input("UnderlyingReader")[0]; @@ -127,13 +126,11 @@ void DecoratedReaderInferVarType::operator()( out_reader->SetDataTypes(in_reader->GetDataTypes()); } -DecoratedReaderMakerBase::DecoratedReaderMakerBase( - framework::OpProtoAndCheckerMaker::OpProto* op_proto, - framework::OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(op_proto, op_checker) { +void DecoratedReaderMakerBase::Make() { AddInput("UnderlyingReader", "(ReaderHolder) The underlying reader for creating a batch reader."); AddOutput("Out", "(ReaderHolder) The created batch reader."); + Apply(); } } // namespace reader diff --git a/paddle/fluid/operators/reader/reader_op_registry.h b/paddle/fluid/operators/reader/reader_op_registry.h index ec25f55ef5..25c3e7d77b 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.h +++ b/paddle/fluid/operators/reader/reader_op_registry.h @@ -25,29 +25,31 @@ namespace reader { static constexpr char kFileFormatSeparator[] = "."; -using FileReaderCreator = std::function&)>; +using FileReaderCreator = + std::function; std::unordered_map& FileReaderRegistry(); template int RegisterFileReader(const std::string& filetype) { - FileReaderRegistry()[filetype] = []( - const std::string& fn, const std::vector& dims) { - return new Reader(fn, dims); + FileReaderRegistry()[filetype] = [](const std::string& fn) { + return new Reader(fn); }; return 0; } std::unique_ptr CreateReaderByFileName( - const std::string& file_name, const std::vector& dims); + const std::string& file_name); extern std::vector RestoreShapes( const std::vector& shape_concat, const std::vector& ranks); class FileReaderMakerBase : public framework::OpProtoAndCheckerMaker { public: - FileReaderMakerBase(OpProto* op_proto, OpAttrChecker* op_checker); + void Make() final; + + protected: + virtual void Apply() = 0; }; class FileReaderInferShape : public framework::InferShapeBase { @@ -76,7 +78,10 @@ class DecoratedReaderInferVarType : public framework::VarTypeInference { class DecoratedReaderMakerBase : public framework::OpProtoAndCheckerMaker { public: - DecoratedReaderMakerBase(OpProto* op_proto, OpAttrChecker* op_checker); + void Make() final; + + protected: + virtual void Apply() = 0; }; } // namespace reader diff --git a/paddle/fluid/operators/recurrent_op.cc b/paddle/fluid/operators/recurrent_op.cc index 72c2905872..162bfcbb08 100644 --- a/paddle/fluid/operators/recurrent_op.cc +++ b/paddle/fluid/operators/recurrent_op.cc @@ -429,7 +429,8 @@ class RecurrentGradOp : public RecurrentBase { auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, - {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); + {{"Out", {pg_names[param_id]}}}, + framework::AttributeMap{{"use_mkldnn", {false}}}); sum_op->Run(cur_scope, place); cur_scope.Rename(new_inside_name, inside_grad_name); @@ -508,8 +509,7 @@ class RecurrentGradOp : public RecurrentBase { class RecurrentOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - RecurrentOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kInputs, "rnn inputs").AsDuplicable(); AddInput(kInitialStates, "rnn initial states").AsDuplicable(); AddInput(kParameters, diff --git a/paddle/fluid/operators/recv_op.cc b/paddle/fluid/operators/recv_op.cc index a4dcf704a6..4a6ce938a5 100644 --- a/paddle/fluid/operators/recv_op.cc +++ b/paddle/fluid/operators/recv_op.cc @@ -19,8 +19,8 @@ limitations under the License. */ #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" - -#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -36,25 +36,27 @@ class RecvOp : public framework::OperatorBase { const platform::Place& place) const override { auto outs = Outputs("Out"); std::vector epmap = Attr>("epmap"); + int sync_mode = Attr("sync_mode"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); + distributed::RPCClient* rpc_client = + distributed::RPCClient::GetInstance(); + for (size_t i = 0; i < outs.size(); i++) { - VLOG(3) << "getting " << outs[i]; - client_.AsyncGetVariable(epmap[i], ctx, scope, outs[i]); + VLOG(3) << "getting " << outs[i] << " from " << epmap[i]; + rpc_client->AsyncGetVar(epmap[i], ctx, scope, outs[i]); + } + if (sync_mode) { + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); } - PADDLE_ENFORCE(client_.Wait()); } - - private: - mutable detail::RPCClient client_; }; class RecvOpMaker : public framework::OpProtoAndCheckerMaker { public: - RecvOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() { AddOutput("Out", "(Tensor) Variables to get from server.").AsDuplicable(); AddComment(R"DOC( Recv operator @@ -66,12 +68,22 @@ This operator can get variables from server side. "Server endpoints in the order of input " "variables for mapping") .SetDefault({}); + AddAttr("sync_mode", + "(int, default 0)" + "sync recv or async recv.") + .SetDefault(0); } }; +class RecvOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext* ctx) const override {} +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(recv, ops::RecvOp, ops::RecvOpMaker); +REGISTER_OPERATOR(recv, ops::RecvOp, paddle::framework::EmptyGradOpMaker, + ops::RecvOpMaker, ops::RecvOpShapeInference); diff --git a/paddle/fluid/operators/reduce_max_op.cc b/paddle/fluid/operators/reduce_max_op.cc new file mode 100644 index 0000000000..95d3768e1f --- /dev/null +++ b/paddle/fluid/operators/reduce_max_op.cc @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_min_max_op.h" + +REGISTER_REDUCE_OP(reduce_max); +REGISTER_OP_CPU_KERNEL( + reduce_max, ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL( + reduce_max_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_max_op.cu b/paddle/fluid/operators/reduce_max_op.cu new file mode 100644 index 0000000000..0d86b3127e --- /dev/null +++ b/paddle/fluid/operators/reduce_max_op.cu @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_min_max_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_max, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CUDA_KERNEL( + reduce_max_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_mean_op.cc b/paddle/fluid/operators/reduce_mean_op.cc new file mode 100644 index 0000000000..fc258c2496 --- /dev/null +++ b/paddle/fluid/operators/reduce_mean_op.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_mean_op.h" + +REGISTER_REDUCE_OP(reduce_mean); +REGISTER_OP_CPU_KERNEL(reduce_mean, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL(reduce_mean_grad, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_mean_op.cu b/paddle/fluid/operators/reduce_mean_op.cu new file mode 100644 index 0000000000..960cb3235b --- /dev/null +++ b/paddle/fluid/operators/reduce_mean_op.cu @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_mean_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_mean, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CUDA_KERNEL( + reduce_mean_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_mean_op.h b/paddle/fluid/operators/reduce_mean_op.h new file mode 100644 index 0000000000..1359679c47 --- /dev/null +++ b/paddle/fluid/operators/reduce_mean_op.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/operators/reduce_op.h" + +namespace paddle { +namespace operators { + +struct MeanFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->mean(dim); + } +}; + +struct MeanGradFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, + const Dim& dim, int size) { + dx->device(place) = dy->broadcast(dim) / dx->constant(size); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reduce_min_max_op.h b/paddle/fluid/operators/reduce_min_max_op.h new file mode 100644 index 0000000000..ec59f3e71c --- /dev/null +++ b/paddle/fluid/operators/reduce_min_max_op.h @@ -0,0 +1,50 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +#pragma once + +#include "paddle/fluid/operators/reduce_op.h" + +namespace paddle { +namespace operators { + +struct MaxFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->maximum(dim); + } +}; + +struct MinFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->minimum(dim); + } +}; + +struct MaxOrMinGradFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, + const Dim& dim, int size) { + auto equals = (*x) == y->broadcast(dim); + auto ones = dx->constant(1); + auto zeros = dx->constant(0); + // If there are multiple minimum or maximum elements, the subgradient of + // each is the set [0, 1], and we pass gradient to all of them here. + dx->device(place) = dy->broadcast(dim) * equals.select(ones, zeros); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reduce_min_op.cc b/paddle/fluid/operators/reduce_min_op.cc new file mode 100644 index 0000000000..330a86d2e4 --- /dev/null +++ b/paddle/fluid/operators/reduce_min_op.cc @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_min_max_op.h" + +REGISTER_REDUCE_OP(reduce_min); +REGISTER_OP_CPU_KERNEL( + reduce_min, ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL( + reduce_min_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_min_op.cu b/paddle/fluid/operators/reduce_min_op.cu new file mode 100644 index 0000000000..da466f805e --- /dev/null +++ b/paddle/fluid/operators/reduce_min_op.cu @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_min_max_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_min, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CUDA_KERNEL( + reduce_min_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_op.cc b/paddle/fluid/operators/reduce_op.cc deleted file mode 100644 index 093db96647..0000000000 --- a/paddle/fluid/operators/reduce_op.cc +++ /dev/null @@ -1,234 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/reduce_op.h" - -#include -#include - -namespace paddle { -namespace operators { - -using framework::Tensor; - -class ReduceOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ReduceOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ReduceOp should not be null."); - auto x_dims = ctx->GetInputDim("X"); - auto x_rank = x_dims.size(); - PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); - int dim = ctx->Attrs().Get("dim"); - if (dim < 0) dim = x_rank + dim; - PADDLE_ENFORCE_LT( - dim, x_rank, - "The dim should be in the range [-rank(input), rank(input))."); - bool reduce_all = ctx->Attrs().Get("reduce_all"); - bool keep_dim = ctx->Attrs().Get("keep_dim"); - if (reduce_all) { - if (keep_dim) - ctx->SetOutputDim( - "Out", framework::make_ddim(std::vector(x_rank, 1))); - else - ctx->SetOutputDim("Out", {1}); - } else { - auto dims_vector = vectorize(x_dims); - if (keep_dim || x_rank == 1) { - dims_vector[dim] = 1; - } else { - dims_vector.erase(dims_vector.begin() + dim); - } - auto out_dims = framework::make_ddim(dims_vector); - ctx->SetOutputDim("Out", out_dims); - if (dim != 0) { - // Only pass LoD when not reducing on the first dim. - ctx->ShareLoD("X", /*->*/ "Out"); - } - } - } -}; - -class ReduceGradOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); - PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), - "Input(Out@GRAD) should not be null."); - auto x_dims = ctx->GetInputDim("X"); - auto x_rank = x_dims.size(); - PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); - int dim = ctx->Attrs().Get("dim"); - if (dim < 0) dim = x_rank + dim; - PADDLE_ENFORCE_LT( - dim, x_rank, - "The dim should be in the range [-rank(input), rank(input))."); - auto x_grad_name = framework::GradVarName("X"); - if (ctx->HasOutput(x_grad_name)) { - ctx->SetOutputDim(x_grad_name, x_dims); - ctx->ShareLoD("X", /*->*/ x_grad_name); - } - } -}; - -class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { - public: - ReduceOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", - "(Tensor) The input tensor. Tensors with rank at most 6 are " - "supported."); - AddOutput("Out", "(Tensor) The result tensor."); - AddAttr( - "dim", - "(int, default 0) The dimension to reduce. " - "Must be in the range [-rank(input), rank(input)). " - "If `dim < 0`, the dim to reduce is `rank + dim`. " - "Note that reducing on the first dim will make the LoD info lost.") - .SetDefault(0); - AddAttr("keep_dim", - "(bool, default false) " - "If true, retain the reduced dimension with length 1.") - .SetDefault(false); - AddAttr("reduce_all", - "(bool, default false) " - "If true, output a scalar reduced along all dimensions.") - .SetDefault(false); - comment_ = R"DOC( -{ReduceOp} Operator. - -This operator computes the {reduce} of input tensor along the given dimension. -The result tensor has 1 fewer dimension than the input unless keep_dim is true. -If reduce_all is true, just reduce along all dimensions and output a scalar. - -)DOC"; - AddComment(comment_); - } - - protected: - std::string comment_; - - void Replace(std::string *src, std::string from, std::string to) { - std::size_t len_from = std::strlen(from.c_str()); - std::size_t len_to = std::strlen(to.c_str()); - for (std::size_t pos = src->find(from); pos != std::string::npos; - pos = src->find(from, pos + len_to)) { - src->replace(pos, len_from, to); - } - } - - void SetComment(std::string name, std::string op) { - Replace(&comment_, "{ReduceOp}", name); - Replace(&comment_, "{reduce}", op); - } -}; - -class ReduceSumOpMaker : public ReduceOpMaker { - public: - ReduceSumOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceSum", "sum"); - AddComment(comment_); - } -}; - -class ReduceMeanOpMaker : public ReduceOpMaker { - public: - ReduceMeanOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceMean", "mean"); - AddComment(comment_); - } -}; - -class ReduceMaxOpMaker : public ReduceOpMaker { - public: - ReduceMaxOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceMax", "max"); - AddComment(comment_); - } -}; - -class ReduceMinOpMaker : public ReduceOpMaker { - public: - ReduceMinOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceMin", "min"); - AddComment(comment_); - } -}; - -class ReduceProdOpMaker : public ReduceOpMaker { - public: - ReduceProdOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : ReduceOpMaker(proto, op_checker) { - SetComment("ReduceProd", "production"); - AddComment(comment_); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -REGISTER_OPERATOR(reduce_sum, ops::ReduceOp, ops::ReduceSumOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_sum_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_mean, ops::ReduceOp, ops::ReduceMeanOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_mean_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_max, ops::ReduceOp, ops::ReduceMaxOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_max_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_min, ops::ReduceOp, ops::ReduceMinOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_min_grad, ops::ReduceGradOp); - -REGISTER_OPERATOR(reduce_prod, ops::ReduceOp, ops::ReduceProdOpMaker, - paddle::framework::DefaultGradOpDescMaker); -REGISTER_OPERATOR(reduce_prod_grad, ops::ReduceGradOp); - -#define REGISTER_REDUCE_CPU_KERNEL(reduce_type, functor, grad_functor) \ - REGISTER_OP_CPU_KERNEL(reduce_type, \ - ops::ReduceKernel, \ - ops::ReduceKernel, \ - ops::ReduceKernel, \ - ops::ReduceKernel); \ - REGISTER_OP_CPU_KERNEL( \ - reduce_type##_grad, \ - ops::ReduceGradKernel, \ - ops::ReduceGradKernel, \ - ops::ReduceGradKernel, \ - ops::ReduceGradKernel); - -FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_CPU_KERNEL); diff --git a/paddle/fluid/operators/reduce_op.cu b/paddle/fluid/operators/reduce_op.cu deleted file mode 100644 index ae29587f55..0000000000 --- a/paddle/fluid/operators/reduce_op.cu +++ /dev/null @@ -1,41 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#define EIGEN_USE_GPU -#include "paddle/fluid/operators/reduce_op.h" - -namespace ops = paddle::operators; - -#define REGISTER_REDUCE_GPU_KERNEL(reduce_type, functor, grad_functor) \ - REGISTER_OP_CUDA_KERNEL( \ - reduce_type, ops::ReduceKernel, \ - ops::ReduceKernel, \ - ops::ReduceKernel, \ - ops::ReduceKernel); \ - REGISTER_OP_CUDA_KERNEL( \ - reduce_type##_grad, \ - ops::ReduceGradKernel, \ - ops::ReduceGradKernel, \ - ops::ReduceGradKernel, \ - ops::ReduceGradKernel); - -FOR_EACH_KERNEL_FUNCTOR(REGISTER_REDUCE_GPU_KERNEL); diff --git a/paddle/fluid/operators/reduce_op.h b/paddle/fluid/operators/reduce_op.h index e42b4bfe42..72b6cf1773 100644 --- a/paddle/fluid/operators/reduce_op.h +++ b/paddle/fluid/operators/reduce_op.h @@ -14,111 +14,36 @@ limitations under the License. */ #pragma once -#include "glog/logging.h" -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" +#include +#include +#include + +#include "paddle/fluid/operators/reduce_op_function.h" namespace paddle { namespace operators { -using Tensor = framework::Tensor; -using DDim = framework::DDim; -template -using EigenTensor = framework::EigenTensor; -template -using EigenScalar = framework::EigenScalar; -template -using EigenVector = framework::EigenVector; - -struct SumFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->sum(dim); - } -}; - -struct SumGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - dx->device(place) = dy->broadcast(dim); - } -}; - -struct MeanFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->mean(dim); - } -}; - -struct MeanGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - dx->device(place) = dy->broadcast(dim) / dx->constant(size); - } -}; - -struct MaxFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->maximum(dim); - } -}; - -struct MinFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->minimum(dim); - } -}; - -struct MaxOrMinGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - auto equals = (*x) == y->broadcast(dim); - auto ones = dx->constant(1); - auto zeros = dx->constant(0); - // If there are multiple minimum or maximum elements, the subgradient of - // each is the set [0, 1], and we pass gradient to all of them here. - dx->device(place) = dy->broadcast(dim) * equals.select(ones, zeros); - } -}; - -struct ProdFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { - y->device(place) = x->prod(dim); - } -}; - -struct ProdGradFunctor { - template - void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, - const Dim& dim, int size) { - dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse(); +#define HANDLE_DIM(NDIM, RDIM) \ + if (ndim == NDIM && rdim == RDIM) { \ + ReduceFunctor( \ + context.template device_context(), *input, output, \ + dims, keep_dim); \ } -}; template class ReduceKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { bool reduce_all = context.Attr("reduce_all"); + auto* input = context.Input("X"); + auto* output = context.Output("Out"); + output->mutable_data(context.GetPlace()); + + auto dims = context.Attr>("dim"); + bool keep_dim = context.Attr("keep_dim"); + if (reduce_all) { // Flatten and reduce 1-D tensor - auto* input = context.Input("X"); - auto* output = context.Output("Out"); - output->mutable_data(context.GetPlace()); auto x = EigenVector::Flatten(*input); auto out = EigenScalar::From(*output); auto& place = @@ -127,61 +52,25 @@ class ReduceKernel : public framework::OpKernel { Functor functor; functor(place, &x, &out, reduce_dim); } else { - int rank = context.Input("X")->dims().size(); - switch (rank) { - case 1: - ReduceCompute<1>(context); - break; - case 2: - ReduceCompute<2>(context); - break; - case 3: - ReduceCompute<3>(context); - break; - case 4: - ReduceCompute<4>(context); - break; - case 5: - ReduceCompute<5>(context); - break; - case 6: - ReduceCompute<6>(context); - break; - } - } - } - - private: - template - void ReduceCompute(const framework::ExecutionContext& context) const { - auto* input = context.Input("X"); - auto* output = context.Output("Out"); - output->mutable_data(context.GetPlace()); - - auto x = EigenTensor::From(*input); - auto x_rank = static_cast(x.dimensions().size()); - int dim = static_cast(context.Attr("dim")); - if (dim < 0) dim = x_rank + dim; - auto reduce_dim = Eigen::array({{dim}}); - // construct the squeezed output tensor - bool keep_dim = context.Attr("keep_dim"); - DDim dims = output->dims(); - auto dims_vector = vectorize(dims); - if (keep_dim && x_rank > 1) { - dims_vector.erase(dims_vector.begin() + dim); - dims = framework::make_ddim(dims_vector); - } - - auto& place = - *context.template device_context().eigen_device(); - Functor functor; - - if (D == 1) { - auto out = EigenScalar::From(*output); - functor(place, &x, &out, reduce_dim); - } else { - auto out = EigenTensor::From(*output, dims); - functor(place, &x, &out, reduce_dim); + int ndim = input->dims().size(); + int rdim = dims.size(); + // comments for accelerating compiling temporarily. + // HANDLE_DIM(6, 5); + // HANDLE_DIM(6, 4); + // HANDLE_DIM(6, 3); + // HANDLE_DIM(6, 2); + // HANDLE_DIM(6, 1); + // HANDLE_DIM(5, 4); + // HANDLE_DIM(5, 3); + // HANDLE_DIM(5, 2); + // HANDLE_DIM(5, 1); + HANDLE_DIM(4, 3); + HANDLE_DIM(4, 2); + HANDLE_DIM(4, 1); + HANDLE_DIM(3, 2); + HANDLE_DIM(3, 1); + HANDLE_DIM(2, 1); + HANDLE_DIM(1, 1); } } }; @@ -191,12 +80,15 @@ class ReduceGradKernel : public framework::OpKernel { public: void Compute(const framework::ExecutionContext& context) const override { bool reduce_all = context.Attr("reduce_all"); + auto dims = context.Attr>("dim"); + + auto* input0 = context.Input("X"); + auto* input1 = context.Input("Out"); + auto* input2 = context.Input(framework::GradVarName("Out")); + auto* output = context.Output(framework::GradVarName("X")); + output->mutable_data(context.GetPlace()); + if (reduce_all) { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Out"); - auto* input2 = context.Input(framework::GradVarName("Out")); - auto* output = context.Output(framework::GradVarName("X")); - output->mutable_data(context.GetPlace()); auto x = EigenVector::Flatten(*input0); auto x_reduce = EigenVector::From(*input1); auto x_reduce_grad = EigenVector::From(*input2); @@ -209,66 +101,172 @@ class ReduceGradKernel : public framework::OpKernel { functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim, broadcast_dim[0]); } else { - int rank = context.Input("X")->dims().size(); + int rank = input0->dims().size(); switch (rank) { case 1: - ReduceGradCompute<1>(context); + ReduceGradFunctor( + context.template device_context(), *input0, + *input1, *input2, output, dims); break; case 2: - ReduceGradCompute<2>(context); + ReduceGradFunctor( + context.template device_context(), *input0, + *input1, *input2, output, dims); break; case 3: - ReduceGradCompute<3>(context); + ReduceGradFunctor( + context.template device_context(), *input0, + *input1, *input2, output, dims); break; case 4: - ReduceGradCompute<4>(context); + ReduceGradFunctor( + context.template device_context(), *input0, + *input1, *input2, output, dims); break; case 5: - ReduceGradCompute<5>(context); + ReduceGradFunctor( + context.template device_context(), *input0, + *input1, *input2, output, dims); break; case 6: - ReduceGradCompute<6>(context); + ReduceGradFunctor( + context.template device_context(), *input0, + *input1, *input2, output, dims); break; } } } +}; - private: - template - void ReduceGradCompute(const framework::ExecutionContext& context) const { - auto* input0 = context.Input("X"); - auto* input1 = context.Input("Out"); - auto* input2 = context.Input(framework::GradVarName("Out")); - auto* output = context.Output(framework::GradVarName("X")); +class ReduceOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; - output->mutable_data(context.GetPlace()); - auto x = EigenTensor::From(*input0); - auto x_grad = EigenTensor::From(*output); - auto x_rank = static_cast(x.dimensions().size()); - int dim = static_cast(context.Attr("dim")); - if (dim < 0) dim = x_rank + dim; - DDim dims = input0->dims(); - dims[dim] = 1; - auto x_reduce = EigenTensor::From(*input1, dims); - auto x_reduce_grad = EigenTensor::From(*input2, dims); + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ReduceOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ReduceOp should not be null."); + auto x_dims = ctx->GetInputDim("X"); + auto x_rank = x_dims.size(); + PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); + auto dims = ctx->Attrs().Get>("dim"); + for (size_t i = 0; i < dims.size(); ++i) { + if (dims[i] < 0) dims[i] = x_rank + dims[i]; + PADDLE_ENFORCE_LT( + dims[i], x_rank, + "The dim should be in the range [-rank(input), rank(input))."); + } + sort(dims.begin(), dims.end()); + bool reduce_all = ctx->Attrs().Get("reduce_all"); + bool keep_dim = ctx->Attrs().Get("keep_dim"); + if (reduce_all) { + if (keep_dim) + ctx->SetOutputDim( + "Out", framework::make_ddim(std::vector(x_rank, 1))); + else + ctx->SetOutputDim("Out", {1}); + } else { + auto dims_vector = vectorize(x_dims); + if (keep_dim) { + for (size_t i = 0; i < dims.size(); ++i) { + dims_vector[dims[i]] = 1; + } + } else { + const int kDelFlag = -2; + for (size_t i = 0; i < dims.size(); ++i) { + dims_vector[dims[i]] = kDelFlag; + } + dims_vector.erase( + remove(dims_vector.begin(), dims_vector.end(), kDelFlag), + dims_vector.end()); + } + auto out_dims = framework::make_ddim(dims_vector); + ctx->SetOutputDim("Out", out_dims); + if (dims[0] != 0) { + // Only pass LoD when not reducing on the first dim. + ctx->ShareLoD("X", /*->*/ "Out"); + } + } + } +}; - Eigen::array broadcast_dim; - for (size_t i = 0; i < D; ++i) broadcast_dim[i] = 1; - broadcast_dim[dim] = input0->dims()[dim]; - auto& place = - *context.template device_context().eigen_device(); - Functor functor; - functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim, - broadcast_dim[dim]); +class ReduceGradOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null."); + PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")), + "Input(Out@GRAD) should not be null."); + auto x_dims = ctx->GetInputDim("X"); + auto x_rank = x_dims.size(); + PADDLE_ENFORCE_LE(x_rank, 6, "Tensors with rank at most 6 are supported."); + auto dims = ctx->Attrs().Get>("dim"); + for (size_t i = 0; i < dims.size(); ++i) { + if (dims[i] < 0) dims[i] = x_rank + dims[i]; + PADDLE_ENFORCE_LT( + dims[i], x_rank, + "The dim should be in the range [-rank(input), rank(input))."); + } + sort(dims.begin(), dims.end()); + auto x_grad_name = framework::GradVarName("X"); + if (ctx->HasOutput(x_grad_name)) { + ctx->SetOutputDim(x_grad_name, x_dims); + ctx->ShareLoD("X", /*->*/ x_grad_name); + } + } +}; + +class ReduceOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() final { + AddInput("X", + "(Tensor) The input tensor. Tensors with rank at most 6 are " + "supported."); + AddOutput("Out", "(Tensor) The result tensor."); + AddAttr>( + "dim", + "(list, default {0}) The dimensions to reduce. " + "Must be in the range [-rank(input), rank(input)). " + "If `dim[i] < 0`, the dims[i] to reduce is `rank + dims[i]`. " + "Note that reducing on the first dim will make the LoD info lost.") + .SetDefault({0}); + AddAttr("keep_dim", + "(bool, default false) " + "If true, retain the reduced dimension with length 1.") + .SetDefault(false); + AddAttr("reduce_all", + "(bool, default false) " + "If true, output a scalar reduced along all dimensions.") + .SetDefault(false); + AddComment(string::Sprintf(R"DOC( +%s Operator. + +This operator computes the %s of input tensor along the given dimension. +The result tensor has 1 fewer dimension than the input unless keep_dim is true. +If reduce_all is true, just reduce along all dimensions and output a scalar. + +)DOC", + GetOpType(), GetName())); } + + protected: + virtual std::string GetName() const = 0; + virtual std::string GetOpType() const = 0; }; } // namespace operators } // namespace paddle -#define FOR_EACH_KERNEL_FUNCTOR(__macro) \ - __macro(reduce_sum, SumFunctor, SumGradFunctor); \ - __macro(reduce_mean, MeanFunctor, MeanGradFunctor); \ - __macro(reduce_max, MaxFunctor, MaxOrMinGradFunctor); \ - __macro(reduce_min, MinFunctor, MaxOrMinGradFunctor); \ - __macro(reduce_prod, ProdFunctor, ProdGradFunctor); +namespace ops = paddle::operators; + +#define REGISTER_REDUCE_OP(op_name) \ + class __##op_name##Maker__ : public ops::ReduceOpMaker { \ + protected: \ + virtual std::string GetName() const { return #op_name; } \ + virtual std::string GetOpType() const { return "Reduce " #op_name; } \ + }; \ + REGISTER_OPERATOR(op_name, ops::ReduceOp, __##op_name##Maker__, \ + paddle::framework::DefaultGradOpDescMaker); \ + REGISTER_OPERATOR(op_name##_grad, ops::ReduceGradOp) diff --git a/paddle/fluid/operators/reduce_op_function.h b/paddle/fluid/operators/reduce_op_function.h new file mode 100644 index 0000000000..3da27bc8ac --- /dev/null +++ b/paddle/fluid/operators/reduce_op_function.h @@ -0,0 +1,109 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; +using DDim = framework::DDim; +template +using EigenTensor = framework::EigenTensor; +template +using EigenScalar = framework::EigenScalar; +template +using EigenVector = framework::EigenVector; + +template +void ReduceFunctor(const DeviceContext& context, const framework::Tensor& input, + framework::Tensor* output, const std::vector& dims, + bool keep_dim) { + auto x = EigenTensor::From(input); + auto x_rank = static_cast(x.dimensions().size()); + auto reduce_dim = Eigen::array(); + std::vector dims_ref = dims; + for (size_t i = 0; i < dims_ref.size(); ++i) { + if (dims_ref[i] < 0) dims_ref[i] = x_rank + dims_ref[i]; + reduce_dim[i] = dims_ref[i]; + } + // construct the squeezed output tensor + DDim out_dims = output->dims(); + if (keep_dim && x_rank > 1) { + const int kDelFlag = -2; + auto dims_vector = framework::vectorize(out_dims); + for (size_t i = 0; i < dims_ref.size(); ++i) { + dims_vector[dims_ref[i]] = kDelFlag; + } + dims_vector.erase(remove(dims_vector.begin(), dims_vector.end(), kDelFlag), + dims_vector.end()); + out_dims = framework::make_ddim(dims_vector); + } + auto& place = *context.eigen_device(); + Functor functor; + + if (D == 1) { + auto out = EigenScalar::From(*output); + functor(place, &x, &out, reduce_dim); + } else { + auto out = EigenTensor::From(*output, out_dims); + functor(place, &x, &out, reduce_dim); + } +} + +template +void ReduceGradFunctor(const DeviceContext& context, + const framework::Tensor& input0, + const framework::Tensor& input1, + const framework::Tensor& input2, + framework::Tensor* output, + const std::vector& dims) { + auto x = EigenTensor::From(input0); + auto x_grad = EigenTensor::From(*output); + auto x_rank = static_cast(x.dimensions().size()); + auto x_dims = input0.dims(); + auto reduced_dims_v = framework::vectorize(x_dims); + std::vector dims_ref = dims; + Eigen::array broadcast_dim; + for (size_t i = 0; i < D; ++i) broadcast_dim[i] = 1; + + int broad_cats_times = 1; + for (size_t i = 0; i < dims_ref.size(); ++i) { + if (dims_ref[i] < 0) { + dims_ref[i] = x_rank + dims_ref[i]; + } + reduced_dims_v[dims_ref[i]] = 1; + broadcast_dim[dims_ref[i]] = x_dims[dims_ref[i]]; + broad_cats_times *= x_dims[dims_ref[i]]; + } + auto reduced_dims = framework::make_ddim(reduced_dims_v); + auto x_reduce = EigenTensor::From(input1, reduced_dims); + auto x_reduce_grad = EigenTensor::From(input2, reduced_dims); + + auto& place = *context.eigen_device(); + + Functor functor; + functor(place, &x, &x_reduce, &x_grad, &x_reduce_grad, broadcast_dim, + broad_cats_times); +} + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reduce_prod_op.cc b/paddle/fluid/operators/reduce_prod_op.cc new file mode 100644 index 0000000000..713728b997 --- /dev/null +++ b/paddle/fluid/operators/reduce_prod_op.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_prod_op.h" + +REGISTER_REDUCE_OP(reduce_prod); +REGISTER_OP_CPU_KERNEL(reduce_prod, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL(reduce_prod_grad, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_prod_op.cu b/paddle/fluid/operators/reduce_prod_op.cu new file mode 100644 index 0000000000..d62e677d92 --- /dev/null +++ b/paddle/fluid/operators/reduce_prod_op.cu @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_prod_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_prod, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CUDA_KERNEL( + reduce_prod_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_prod_op.h b/paddle/fluid/operators/reduce_prod_op.h new file mode 100644 index 0000000000..97748113e0 --- /dev/null +++ b/paddle/fluid/operators/reduce_prod_op.h @@ -0,0 +1,39 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include "paddle/fluid/operators/reduce_op.h" + +namespace paddle { +namespace operators { + +struct ProdFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->prod(dim); + } +}; + +struct ProdGradFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, + const Dim& dim, int size) { + dx->device(place) = dy->broadcast(dim) * y->broadcast(dim) * x->inverse(); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reduce_sum_op.cc b/paddle/fluid/operators/reduce_sum_op.cc new file mode 100644 index 0000000000..f0e5f6580f --- /dev/null +++ b/paddle/fluid/operators/reduce_sum_op.cc @@ -0,0 +1,35 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_sum_op.h" + +REGISTER_REDUCE_OP(reduce_sum); +REGISTER_OP_CPU_KERNEL( + reduce_sum, ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CPU_KERNEL( + reduce_sum_grad, + ops::ReduceSumGradKernel, + ops::ReduceSumGradKernel, + ops::ReduceSumGradKernel, + ops::ReduceSumGradKernel); diff --git a/paddle/fluid/operators/reduce_sum_op.cu b/paddle/fluid/operators/reduce_sum_op.cu new file mode 100644 index 0000000000..f2e16955a5 --- /dev/null +++ b/paddle/fluid/operators/reduce_sum_op.cu @@ -0,0 +1,34 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reduce_sum_op.h" + +REGISTER_OP_CUDA_KERNEL(reduce_sum, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel, + ops::ReduceKernel); +REGISTER_OP_CUDA_KERNEL( + reduce_sum_grad, ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel, + ops::ReduceGradKernel); diff --git a/paddle/fluid/operators/reduce_sum_op.h b/paddle/fluid/operators/reduce_sum_op.h new file mode 100644 index 0000000000..3e8d1bbdba --- /dev/null +++ b/paddle/fluid/operators/reduce_sum_op.h @@ -0,0 +1,97 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/fluid/operators/reduce_op.h" + +namespace paddle { +namespace operators { + +// use for loop to speed up Eigen broadcast. 4 timer faster then broadcast +template +class ReduceSumGradKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto dims = context.Attr>("dim"); + if (context.GetPlace().type() == typeid(platform::CPUPlace) && + dims.size() == 1) { + auto* input0 = context.Input("X"); + auto* input2 = context.Input(framework::GradVarName("Out")); + auto* output = context.Output(framework::GradVarName("X")); + output->mutable_data(context.GetPlace()); + const auto* input2_d = input2->data(); + auto* output_d = output->data(); + + // handle reduce_all + if (input2->dims().size() == 1 && input2->dims()[0] == 1) { + for (int64_t i = 0; i < framework::product(input0->dims()); ++i) { + output_d[i] = input2_d[0]; + } + return; + } + + // handle reduce by one dimension + int reduce_dim_index = dims[0]; + if (reduce_dim_index < 0) { + reduce_dim_index += input0->dims().size(); + } + + auto& input_dim = input0->dims(); + int64_t before_dim = 1; + for (int i = 0; i < reduce_dim_index; ++i) { + before_dim *= input_dim[i]; + } + int64_t reduce_dim = input_dim[reduce_dim_index]; + int64_t after_dim = 1; + for (int i = reduce_dim_index + 1; i < input_dim.size(); ++i) { + after_dim *= input_dim[i]; + } + for (int64_t i = 0; i < before_dim; ++i) { + for (int64_t j = 0; j < reduce_dim; ++j) { + for (int64_t k = 0; k < after_dim; ++k) { + output_d[i * reduce_dim * after_dim + j * after_dim + k] = + input2_d[i * after_dim + k]; + } + } + } + return; + } + + // default use Eigen broadcast + ReduceGradKernel kernel; + kernel.Compute(context); + } +}; + +struct SumFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, const Dim& dim) { + y->device(place) = x->sum(dim); + } +}; + +struct SumGradFunctor { + template + void operator()(const DeviceContext& place, X* x, Y* y, DX* dx, DY* dy, + const Dim& dim, int size) { + dx->device(place) = dy->eval().broadcast(dim); + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc index 5c3e1f5678..e4f4fe358e 100644 --- a/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc +++ b/paddle/fluid/operators/reorder_lod_tensor_by_rank_op.cc @@ -23,9 +23,7 @@ namespace operators { class ReorderLoDTensorByRankTableOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ReorderLoDTensorByRankTableOpProtoMaker(OpProto *proto, - OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), the input lod tensor to be reordered according to " "Input(RankTable)."); diff --git a/paddle/fluid/operators/reshape_op.cc b/paddle/fluid/operators/reshape_op.cc index 5e5ccc3ded..a1dfe39c3a 100644 --- a/paddle/fluid/operators/reshape_op.cc +++ b/paddle/fluid/operators/reshape_op.cc @@ -12,18 +12,111 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/fluid/operators/reshape_op.h" - #include #include +#include "paddle/fluid/framework/op_registry.h" namespace paddle { namespace operators { +class ReshapeOp : public framework::OperatorWithKernel { + public: + ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, + const framework::VariableNameMap &outputs, + const framework::AttributeMap &attrs) + : OperatorWithKernel(type, inputs, outputs, attrs) {} + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of ReshapeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of ReshapeOp should not be null."); + + const std::vector &shape = ctx->Attrs().Get>("shape"); + PADDLE_ENFORCE(!shape.empty(), + "The shape information must be set by Attr(shape)."); + + if (ctx->HasInput("Shape") && ctx->IsRuntime()) { + // If true, set the shape of Output(Out) according to Input(Shape) in + // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel. + ctx->ShareLoD("X", /*->*/ "Out"); + return; + } + + auto x_dims = ctx->GetInputDim("X"); + auto out_dims = ValidateShape(shape, x_dims); + ctx->SetOutputDim("Out", out_dims); + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", /*->*/ "Out"); + } + } + + static framework::DDim ValidateShape(const std::vector shape, + const framework::DDim &in_dims) { + const int64_t in_size = framework::product(in_dims); + // only one dimension can be set to -1, whose size will be automatically + // infered. + const int64_t unk_dim_val = -1; + const int64_t copy_dim_val = 0; + + std::vector output_shape(shape.size(), 0); + int64_t capacity = 1; + int unk_dim_idx = -1; + for (size_t i = 0; i < shape.size(); ++i) { + if (shape[i] == unk_dim_val) { + PADDLE_ENFORCE( + unk_dim_idx == -1, + "Only one input dimension of Attr(shape) can be unknown."); + unk_dim_idx = i; + } else if (shape[i] == copy_dim_val) { + PADDLE_ENFORCE( + static_cast(i) < in_dims.size(), + "The index of dimension to copy from input shape must be less " + "than the size of input shape."); + } else { + PADDLE_ENFORCE( + shape[i] > 0, + "Each input dimension of Attr(shape) must not be negtive except " + "one unknown dimension."); + } + + capacity *= (shape[i] ? shape[i] : in_dims[i]); + output_shape[i] = + (shape[i] ? static_cast(shape[i]) : in_dims[i]); + } + + if (unk_dim_idx != -1) { + if (in_size > 0) { + // in_size < 0 and is un-determinate in compile time, skip the check, + // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], + // capacity = -24, in_size = -8, output_shape[0] = 0 + // the following check will fail. + output_shape[unk_dim_idx] = -in_size / capacity; + PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, + "Invalid shape is given."); + } else { + output_shape[unk_dim_idx] = -1; + } + } else { + PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); + } + return framework::make_ddim(output_shape); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("X")->type()), + ctx.device_context()); + } +}; + class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: - ReshapeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor). The input tensor of reshape operator."); AddInput("Shape", "(Tensor, optional). If provided, reshape according to " @@ -34,12 +127,6 @@ class ReshapeOpMaker : public framework::OpProtoAndCheckerMaker { AddOutput("Out", "(Tensor). The output tensor of reshape operator."); AddAttr>( "shape", "(std::vector) Target shape of reshape operator."); - AddAttr("inplace", - "(default: false) Change the source tensor's shape without " - "memory copy. When Attr(inplace) is set true, the output " - "tensor shares memory with Input(X), otherwise, a new output " - "tensor is created, and its data are copied from Input(x).") - .SetDefault(false); AddComment(R"DOC( Reshape Operator. @@ -108,19 +195,78 @@ class ReshapeGradOp : public framework::OperatorWithKernel { } }; +class ReshapeKernel { + public: + void operator()(const framework::ExecutionContext &ctx) const { + auto *out = ctx.Output("Out"); + auto *in = ctx.Input("X"); + + auto *shape_tensor = ctx.HasInput("Shape") + ? ctx.Input("Shape") + : nullptr; + + framework::DDim out_dims = out->dims(); + + if (shape_tensor) { + auto *shape_data = shape_tensor->data(); + framework::Tensor cpu_shape_tensor; + if (platform::is_gpu_place(shape_tensor->place())) { + TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); + shape_data = cpu_shape_tensor.data(); + } + auto shape = + std::vector(shape_data, shape_data + shape_tensor->numel()); + out_dims = ReshapeOp::ValidateShape(shape, in->dims()); + } + if (!in->lod().empty()) { + PADDLE_ENFORCE_EQ( + out_dims[0], in->dims()[0], + "Reshape operator cannot reshape an input sequence batch " + "into an output sequence batch that has a different " + "number of time steps. Please consider using " + "sequence_reshape op."); + } + + out->mutable_data(ctx.GetPlace(), in->type()); + framework::TensorCopySync(*in, ctx.GetPlace(), out); + out->Resize(out_dims); + } +}; + +class ReshapeGradKernel { + public: + void operator()(const framework::ExecutionContext &ctx) const { + auto *d_out = ctx.Input(framework::GradVarName("Out")); + auto *d_x = ctx.Output(framework::GradVarName("X")); + auto in_dims = d_x->dims(); + + d_x->mutable_data(ctx.GetPlace(), d_out->type()); + framework::TensorCopySync(*d_out, ctx.GetPlace(), d_x); + d_x->Resize(in_dims); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; -using CPU = paddle::platform::CPUDeviceContext; REGISTER_OPERATOR(reshape, ops::ReshapeOp, ops::ReshapeOpMaker, paddle::framework::DefaultGradOpDescMaker); REGISTER_OPERATOR(reshape_grad, ops::ReshapeGradOp); -REGISTER_OP_CPU_KERNEL(reshape, ops::ReshapeKernel, - ops::ReshapeKernel, - ops::ReshapeKernel, - ops::ReshapeKernel); -REGISTER_OP_CPU_KERNEL(reshape_grad, ops::ReshapeGradKernel, - ops::ReshapeGradKernel, - ops::ReshapeGradKernel, - ops::ReshapeGradKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CPU_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); + +#ifdef PADDLE_WITH_CUDA +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape, float, ops::ReshapeKernel, double, + ops::ReshapeKernel, int, ops::ReshapeKernel, + int64_t, ops::ReshapeKernel); +REGISTER_OP_CUDA_KERNEL_FUNCTOR(reshape_grad, float, ops::ReshapeGradKernel, + double, ops::ReshapeGradKernel, int, + ops::ReshapeGradKernel, int64_t, + ops::ReshapeGradKernel); +#endif diff --git a/paddle/fluid/operators/reshape_op.cu b/paddle/fluid/operators/reshape_op.cu deleted file mode 100644 index c628c634e2..0000000000 --- a/paddle/fluid/operators/reshape_op.cu +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/operators/reshape_op.h" -using CUDA = paddle::platform::CUDADeviceContext; - -REGISTER_OP_CUDA_KERNEL(reshape, paddle::operators::ReshapeKernel, - paddle::operators::ReshapeKernel, - paddle::operators::ReshapeKernel, - paddle::operators::ReshapeKernel); -REGISTER_OP_CUDA_KERNEL(reshape_grad, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel, - paddle::operators::ReshapeGradKernel); diff --git a/paddle/fluid/operators/reshape_op.h b/paddle/fluid/operators/reshape_op.h deleted file mode 100644 index ccd7063fe6..0000000000 --- a/paddle/fluid/operators/reshape_op.h +++ /dev/null @@ -1,184 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include - -#include "paddle/fluid/framework/eigen.h" -#include "paddle/fluid/framework/op_registry.h" - -namespace paddle { -namespace operators { - -class ReshapeOp : public framework::OperatorWithKernel { - public: - ReshapeOp(const std::string &type, const framework::VariableNameMap &inputs, - const framework::VariableNameMap &outputs, - const framework::AttributeMap &attrs) - : OperatorWithKernel(type, inputs, outputs, attrs) {} - - void InferShape(framework::InferShapeContext *ctx) const override { - PADDLE_ENFORCE(ctx->HasInput("X"), - "Input(X) of ReshapeOp should not be null."); - PADDLE_ENFORCE(ctx->HasOutput("Out"), - "Output(Out) of ReshapeOp should not be null."); - - const std::vector &shape = ctx->Attrs().Get>("shape"); - PADDLE_ENFORCE(!shape.empty(), - "The shape information must be set by Attr(shape)."); - - if (ctx->HasInput("Shape") && ctx->IsRuntime()) { - // If true, set the shape of Output(Out) according to Input(Shape) in - // ReshapeKernel with ExecutionContext. Also check LoD in ReshapeKernel. - ctx->ShareLoD("X", /*->*/ "Out"); - return; - } - - auto x_dims = ctx->GetInputDim("X"); - auto out_dims = ValidateShape(shape, x_dims); - ctx->SetOutputDim("Out", out_dims); - if (x_dims[0] == out_dims[0]) { - // Only pass LoD when the first dimension of output and Input(X) - // are the same. - ctx->ShareLoD("X", /*->*/ "Out"); - } - } - - static framework::DDim ValidateShape(const std::vector shape, - const framework::DDim &in_dims) { - const int64_t in_size = framework::product(in_dims); - // only one dimension can be set to -1, whose size will be automatically - // infered. - const int64_t unk_dim_val = -1; - const int64_t copy_dim_val = 0; - - std::vector output_shape(shape.size(), 0); - int64_t capacity = 1; - int unk_dim_idx = -1; - for (size_t i = 0; i < shape.size(); ++i) { - if (shape[i] == unk_dim_val) { - PADDLE_ENFORCE( - unk_dim_idx == -1, - "Only one input dimension of Attr(shape) can be unknown."); - unk_dim_idx = i; - } else if (shape[i] == copy_dim_val) { - PADDLE_ENFORCE( - static_cast(i) < in_dims.size(), - "The index of dimension to copy from input shape must be less " - "than the size of input shape."); - } else { - PADDLE_ENFORCE( - shape[i] > 0, - "Each input dimension of Attr(shape) must not be negtive except " - "one unknown dimension."); - } - - capacity *= (shape[i] ? shape[i] : in_dims[i]); - output_shape[i] = - (shape[i] ? static_cast(shape[i]) : in_dims[i]); - } - - if (unk_dim_idx != -1) { - output_shape[unk_dim_idx] = -in_size / capacity; - // in_size < 0 and is un-determinate in compile time, skip the check, - // for example, in_dims = [-1, 8, 1, 1], shape = [-1, 3, 8], - // capacity = -24, in_size = -8, output_shape[0] = 0 - // the following check will fail. - if (in_size > 0) { - PADDLE_ENFORCE_EQ(output_shape[unk_dim_idx] * capacity, -in_size, - "Invalid shape is given."); - } - } else { - PADDLE_ENFORCE_EQ(capacity, in_size, "Invalid shape is given."); - } - return framework::make_ddim(output_shape); - } - - protected: - framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), - ctx.device_context()); - } -}; - -template -class ReshapeKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const { - auto *out = ctx.Output("Out"); - auto *in = ctx.Input("X"); - auto *shape_tensor = ctx.Input("Shape"); - - framework::DDim out_dims = out->dims(); - - if (shape_tensor) { - auto *shape_data = shape_tensor->data(); - framework::Tensor cpu_shape_tensor; - if (platform::is_gpu_place(ctx.GetPlace())) { - TensorCopySync(*shape_tensor, platform::CPUPlace(), &cpu_shape_tensor); - shape_data = cpu_shape_tensor.data(); - } - auto shape = - std::vector(shape_data, shape_data + shape_tensor->numel()); - out_dims = ReshapeOp::ValidateShape(shape, in->dims()); - } - if (!in->lod().empty()) { - PADDLE_ENFORCE_EQ( - out_dims[0], in->dims()[0], - "Reshape operator cannot reshape an input sequence batch " - "into an output sequence batch that has a different " - "number of time steps. Please consider using " - "sequence_reshape op."); - } - - bool inplace = ctx.Attr("inplace"); - out->Resize(out_dims); - if (!inplace) { - out->mutable_data(ctx.GetPlace()); - framework::TensorCopySync(*in, ctx.GetPlace(), out); - out->Resize(out_dims); - } else { - out->ShareDataWith(*in); - out->Resize(out_dims); - } - } -}; - -template -class ReshapeGradKernel : public framework::OpKernel { - public: - void Compute(const framework::ExecutionContext &ctx) const { - auto *d_out = ctx.Input(framework::GradVarName("Out")); - auto *d_x = ctx.Output(framework::GradVarName("X")); - - d_x->mutable_data(ctx.GetPlace()); - bool inplace = ctx.Attr("inplace"); - - auto in_dims = d_x->dims(); - if (!inplace) { - framework::TensorCopy(*d_out, ctx.GetPlace(), ctx.device_context(), d_x); - ctx.device_context().Wait(); - d_x->Resize(in_dims); - } else { - d_x->ShareDataWith(*d_out); - d_x->Resize(in_dims); - } - } -}; -} // namespace operators -} // namespace paddle diff --git a/paddle/fluid/operators/reverse_op.cc b/paddle/fluid/operators/reverse_op.cc new file mode 100644 index 0000000000..a20f7d231f --- /dev/null +++ b/paddle/fluid/operators/reverse_op.cc @@ -0,0 +1,107 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reverse_op.h" +#include + +namespace paddle { +namespace operators { + +class ReverseOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext* ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) should not be null"); + PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) should not be null"); + const auto& x_dims = ctx->GetInputDim("X"); + const auto& axis = ctx->Attrs().Get>("axis"); + PADDLE_ENFORCE(!axis.empty(), "'axis' can not be empty."); + for (int a : axis) { + PADDLE_ENFORCE_LT(a, x_dims.size(), + "The axis must be less than input tensor's rank."); + } + ctx->SetOutputDim("Out", x_dims); + } +}; + +class ReverseOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "The LoDTensor to be flipped."); + AddOutput("Out", "The LoDTensor after flipping."); + AddAttr>( + "axis", "The axises that along which order of elements is reversed."); + AddComment(R"DOC( + Reverse Operator. + + Reverse the order of elements in the input LoDTensor along given axises. + + Case 1: + Given + X = [[1, 2, 3, 4, 5] + [6, 7, 8, 9, 10] + [11, 12, 13, 14, 15]], + and + axis = [0], + we get: + Out = [[11, 12, 13, 14, 15] + [6, 7, 8, 9, 10] + [1, 2, 3, 4, 5]]. + + Case 2: + Given + X = [[[1, 2, 3, 4] + [5, 6, 7, 8]] + [[9, 10, 11, 12] + [13, 14, 15, 16]]], + and + axis = [0, 2], + we get: + Out = [[[12, 11, 10, 9] + [16, 15, 14, 13]] + [[4, 3, 2, 1] + [8, 7, 6, 5]]], + )DOC"); + } +}; + +class ReverseGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + std::unique_ptr Apply() const override { + auto* grad_op = new framework::OpDesc(); + grad_op->SetType("reverse"); + grad_op->SetInput("X", OutputGrad("Out")); + grad_op->SetOutput("Out", InputGrad("X")); + grad_op->SetAttr("axis", GetAttr("axis")); + return std::unique_ptr(grad_op); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(reverse, ops::ReverseOp, ops::ReverseOpMaker, + ops::ReverseGradMaker); +REGISTER_OPERATOR(reverse_grad, ops::ReverseOp); +REGISTER_OP_CPU_KERNEL( + reverse, ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel) diff --git a/paddle/fluid/operators/reverse_op.cu b/paddle/fluid/operators/reverse_op.cu new file mode 100644 index 0000000000..635c41529b --- /dev/null +++ b/paddle/fluid/operators/reverse_op.cu @@ -0,0 +1,24 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/operators/reverse_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + reverse, ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel, + ops::ReverseKernel) diff --git a/paddle/fluid/operators/reverse_op.h b/paddle/fluid/operators/reverse_op.h new file mode 100644 index 0000000000..9063cd59bb --- /dev/null +++ b/paddle/fluid/operators/reverse_op.h @@ -0,0 +1,87 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once +#include +#include "paddle/fluid/framework/eigen.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { +template +struct ReverseFunctor { + void operator()(const DeviceContext& context, const framework::LoDTensor& in, + framework::LoDTensor* out, const std::vector& axis) { + Eigen::array reverse_axis; + for (int i = 0; i < Rank; ++i) { + reverse_axis[i] = false; + } + for (int a : axis) { + reverse_axis[a] = true; + } + + auto in_eigen = framework::EigenTensor::From(in); + auto out_eigen = framework::EigenTensor::From(*out); + auto* dev = context.eigen_device(); + + out_eigen.device(*dev) = in_eigen.reverse(reverse_axis); + } +}; + +template +class ReverseKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto* x = context.Input("X"); + auto* out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + const auto& axis = context.Attr>("axis"); + int rank = x->dims().size(); + auto& dev_ctx = context.template device_context(); + + switch (rank) { + case 1: + ReverseFunctor functor1; + functor1(dev_ctx, *x, out, axis); + break; + case 2: + ReverseFunctor functor2; + functor2(dev_ctx, *x, out, axis); + break; + case 3: + ReverseFunctor functor3; + functor3(dev_ctx, *x, out, axis); + break; + case 4: + ReverseFunctor functor4; + functor4(dev_ctx, *x, out, axis); + break; + case 5: + ReverseFunctor functor5; + functor5(dev_ctx, *x, out, axis); + break; + case 6: + ReverseFunctor functor6; + functor6(dev_ctx, *x, out, axis); + break; + default: + PADDLE_THROW( + "Reserve operator doesn't supports tensors whose ranks are greater " + "than 6."); + } + } +}; + +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/rmsprop_op.cc b/paddle/fluid/operators/rmsprop_op.cc index a8855b3ccd..919ebe48ca 100644 --- a/paddle/fluid/operators/rmsprop_op.cc +++ b/paddle/fluid/operators/rmsprop_op.cc @@ -63,8 +63,7 @@ class RmspropOp : public framework::OperatorWithKernel { class RmspropOpMaker : public framework::OpProtoAndCheckerMaker { public: - RmspropOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor, default Tensor) " "Input parameter value that has to be updated."); diff --git a/paddle/fluid/operators/rnn_memory_helper_op.cc b/paddle/fluid/operators/rnn_memory_helper_op.cc index 70f205d887..23e5fc1112 100644 --- a/paddle/fluid/operators/rnn_memory_helper_op.cc +++ b/paddle/fluid/operators/rnn_memory_helper_op.cc @@ -59,8 +59,7 @@ class RNNMemoryHelperOpShapeInference : public framework::InferShapeBase { class RNNMemoryHelperOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - RNNMemoryHelperOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", ""); AddOutput("Out", ""); AddAttr("dtype", @@ -117,8 +116,7 @@ class RNNMemoryHelperGradOp : public framework::OperatorBase { class RNNMemoryHelperGradOpInfoMaker : public framework::OpProtoAndCheckerMaker { public: - RNNMemoryHelperGradOpInfoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(framework::GradVarName("Out"), ""); AddInput("X", ""); AddInput("Out", ""); diff --git a/paddle/fluid/operators/roi_pool_op.cc b/paddle/fluid/operators/roi_pool_op.cc index 397e49ef20..d6d209d5de 100644 --- a/paddle/fluid/operators/roi_pool_op.cc +++ b/paddle/fluid/operators/roi_pool_op.cc @@ -98,8 +98,7 @@ class ROIPoolGradOp : public framework::OperatorWithKernel { class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { public: - ROIPoolOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor), " "the input of ROIPoolOp. " @@ -140,7 +139,20 @@ class ROIPoolOpMaker : public framework::OpProtoAndCheckerMaker { "The pooled output width.") .SetDefault(1); AddComment(R"DOC( -ROIPool operator +**ROIPool Operator** + +Region of interest pooling (also known as RoI pooling) is to perform +is to perform max pooling on inputs of nonuniform sizes to obtain +fixed-size feature maps (e.g. 7*7). + +The operator has three steps: + +1. Dividing each region proposal into equal-sized sections with + the pooled_width and pooled_height + +2. Finding the largest value in each section + +3. Copying these max values to the output buffer ROI Pooling for Faster-RCNN. The link below is a further introduction: https://stackoverflow.com/questions/43430056/what-is-roi-layer-in-fast-rcnn diff --git a/paddle/fluid/operators/roi_pool_op.cu b/paddle/fluid/operators/roi_pool_op.cu index f905d690f9..50450b62f7 100644 --- a/paddle/fluid/operators/roi_pool_op.cu +++ b/paddle/fluid/operators/roi_pool_op.cu @@ -38,10 +38,10 @@ __global__ void GPUROIPoolForward( int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (size_t i = index; i < nthreads; i += offset) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + int pw = i % pooled_width; + int ph = (i / pooled_width) % pooled_height; + int c = (i / pooled_width / pooled_height) % channels; + int n = i / pooled_width / pooled_height / channels; const int64_t* offset_input_rois = input_rois + n * kROISize; int roi_batch_ind = roi_batch_id_data[n]; @@ -52,14 +52,19 @@ __global__ void GPUROIPoolForward( int roi_width = max(roi_end_w - roi_start_w + 1, 1); int roi_height = max(roi_end_h - roi_start_h + 1, 1); - T bin_size_h = static_cast(roi_height) / static_cast(pooled_height); - T bin_size_w = static_cast(roi_width) / static_cast(pooled_width); - - int hstart = static_cast(floor(static_cast(ph) * bin_size_h)); - int wstart = static_cast(floor(static_cast(pw) * bin_size_w)); - int hend = static_cast(ceil(static_cast(ph + 1) * bin_size_h)); - int wend = static_cast(ceil(static_cast(pw + 1) * bin_size_w)); + int hstart = static_cast(floor(static_cast(ph) * + static_cast(roi_height) / + static_cast(pooled_height))); + int wstart = static_cast(floor(static_cast(pw) * + static_cast(roi_width) / + static_cast(pooled_width))); + int hend = static_cast(ceil(static_cast(ph + 1) * + static_cast(roi_height) / + static_cast(pooled_height))); + int wend = static_cast(ceil(static_cast(pw + 1) * + static_cast(roi_width) / + static_cast(pooled_width))); hstart = min(max(hstart + roi_start_h, 0), height); hend = min(max(hend + roi_start_h, 0), height); wstart = min(max(wstart + roi_start_w, 0), width); @@ -79,9 +84,9 @@ __global__ void GPUROIPoolForward( } } } - output_data[index] = maxval; + output_data[i] = maxval; if (argmax_data) { - argmax_data[index] = maxidx; + argmax_data[i] = maxidx; } } } @@ -96,10 +101,10 @@ __global__ void GPUROIPoolBackward( int index = blockIdx.x * blockDim.x + threadIdx.x; int offset = blockDim.x * gridDim.x; for (int i = index; i < nthreads; i += offset) { - int pw = index % pooled_width; - int ph = (index / pooled_width) % pooled_height; - int c = (index / pooled_width / pooled_height) % channels; - int n = index / pooled_width / pooled_height / channels; + int pw = i % pooled_width; + int ph = (i / pooled_width) % pooled_height; + int c = (i / pooled_width / pooled_height) % channels; + int n = i / pooled_width / pooled_height / channels; int roi_batch_ind = roi_batch_id_data[n]; int input_offset = (roi_batch_ind * channels + c) * height * width; @@ -138,6 +143,7 @@ class GPUROIPoolOpKernel : public framework::OpKernel { int width = in_dims[3]; int rois_num = rois->dims()[0]; + if (rois_num == 0) return; int output_size = out->numel(); diff --git a/paddle/fluid/operators/row_conv_op.cc b/paddle/fluid/operators/row_conv_op.cc index 23f720da0b..10b1b0c899 100644 --- a/paddle/fluid/operators/row_conv_op.cc +++ b/paddle/fluid/operators/row_conv_op.cc @@ -76,26 +76,25 @@ class RowConvGradOp : public framework::OperatorWithKernel { class RowConvOpMaker : public framework::OpProtoAndCheckerMaker { public: - RowConvOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", - "(LoDTensor), the input(X) is a LodTensor, which supports " + "the input(X) is a LodTensor, which supports " "variable time-length input sequences. The underlying tensor " "in this LoDTensor is a matrix with shape (T x N), where T " "is the total time steps in this mini-batch and N is the input " "data dimension."); AddInput("Filter", - "(Tensor), the input(Filter) is a learnable parameter. It " + "the input(Filter) is a learnable parameter. It " "is a 2-D tensor with shape (future_context x N), where, " "future_context is the future context length and N is the data " "dimension."); AddOutput("Out", - "(LoDTensor), the output(Out) is a LodTensor, which supports " + "the output(Out) is a LodTensor, which supports " "variable time-length input sequences. The underlying tensor " "in this LodTensor is a matrix with shape T x N, i.e., the " "same shape as X."); AddComment(R"DOC( -Row-convolution Operator. +:strong:`Row-convolution operator` The row convolution is called lookahead convolution. This operator was introduced in the following paper for DeepSpeech2: @@ -115,9 +114,23 @@ and a filter ($W$) of size $context \times d$, the output sequence is convolved as: $$ -out_{i, :} = \sum_{j=i}^{i + context} in_{j,:} \dot W_{i-j, :} +out_{i, :} = \\sum_{j=i}^{i + context} in_{j,:} \\cdot W_{i-j, :} $$ +In the above equation: + +* $Out_{i}$: The i-th row of output variable with shape [1, D]. + +* $\\tau$: Future context size. + +* $X_{j}$: The j-th row of input variable with shape [1, D]. + +* $W_{i-j}$: The (i-j)-th row of parameters with shape [1, D]. + +More details about row_conv please refer to +the design document +https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645 . + )DOC"); } }; diff --git a/paddle/fluid/operators/save_combine_op.cc b/paddle/fluid/operators/save_combine_op.cc index 94703393bf..cfee920708 100644 --- a/paddle/fluid/operators/save_combine_op.cc +++ b/paddle/fluid/operators/save_combine_op.cc @@ -18,6 +18,7 @@ limitations under the License. */ #include #include #include "paddle/fluid/framework/data_type.h" +#include "paddle/fluid/framework/data_type_transform.h" #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" @@ -69,6 +70,7 @@ class SaveCombineOp : public framework::OperatorBase { const platform::Place &place) const override { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); + auto save_as_fp16 = Attr("save_as_fp16"); bool is_present = FileExists(filename); if (is_present && !overwrite) { @@ -100,8 +102,24 @@ class SaveCombineOp : public framework::OperatorBase { inp_var_names[i]); auto &tensor = var->Get(); - // Serialize tensor - framework::SerializeToStream(fout, tensor, dev_ctx); + // Serialize tensors one by one + + // Check types to see if a fp16 transformation is required + auto in_dtype = framework::ToDataType(tensor.type()); + auto out_dtype = + save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; + + if (in_dtype != out_dtype) { + auto in_kernel_type = framework::OpKernelType(in_dtype, place); + auto out_kernel_type = framework::OpKernelType(out_dtype, place); + framework::LoDTensor out; + // copy LoD info to the new tensor + out.set_lod(tensor.lod()); + framework::TransDataType(in_kernel_type, out_kernel_type, tensor, &out); + framework::SerializeToStream(fout, out, dev_ctx); + } else { + framework::SerializeToStream(fout, tensor, dev_ctx); + } } fout.close(); } @@ -109,8 +127,7 @@ class SaveCombineOp : public framework::OperatorBase { class SaveCombineOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SaveCombineOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(vector) Input LoDTensors that need to be saved together in a file.") @@ -125,6 +142,12 @@ to a file on disk. "(boolean, default true)" "Overwrite the output file if it exists.") .SetDefault(true); + AddAttr("save_as_fp16", + "(boolean, default false)" + "If true, the tensor will be converted to float16 data " + "type and then saved. Otherwise, the tensor will be " + "directly saved without data type conversion.") + .SetDefault(false); AddAttr( "file_path", "(string)" diff --git a/paddle/fluid/operators/save_load_combine_op_test.cc b/paddle/fluid/operators/save_load_combine_op_test.cc index 2773c32a0a..4743e0d949 100644 --- a/paddle/fluid/operators/save_load_combine_op_test.cc +++ b/paddle/fluid/operators/save_load_combine_op_test.cc @@ -17,15 +17,17 @@ limitations under the License. */ #include #include "gtest/gtest.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/platform/float16.h" USE_NO_KERNEL_OP(save_combine); USE_NO_KERNEL_OP(load_combine); -int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, - std::string var_name, - const paddle::platform::CPUPlace& place, - paddle::framework::Scope* scope, - paddle::framework::LoD* expect_lod) { +template +T* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, + std::string var_name, + const paddle::platform::CPUPlace& place, + paddle::framework::Scope* scope, + paddle::framework::LoD* expect_lod) { auto var = scope->Var(var_name); auto tensor = var->GetMutable(); tensor->Resize({x, y}); @@ -34,9 +36,10 @@ int* CreateForSaveCombineOp(int x, int y, const std::vector& lod_info, (*expect_lod)[0].push_back(lod_info[i]); } tensor->set_lod(*expect_lod); - int* expect = tensor->mutable_data(place); + T* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { - expect[i] = static_cast(i); + expect[i] = static_cast( + static_cast(i)); // For FP16, we intend to do float(float16(i)) } return expect; } @@ -48,18 +51,20 @@ paddle::framework::LoDTensor* GeneratePlaceholderBeforeLoad( return target; } -int* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, - const paddle::framework::Scope& scope, - paddle::framework::LoD* actual_lod) { - int* actual = target->data(); +template +T* GetValuesAfterLoadCombineOp(paddle::framework::LoDTensor* target, + const paddle::framework::Scope& scope, + paddle::framework::LoD* actual_lod) { + T* actual = target->data(); *actual_lod = target->lod(); return actual; } -void CheckValues(int* expect, int* actual, paddle::framework::LoD expect_lod, - paddle::framework::LoD actual_lod, const int& numel) { - for (int64_t i = 0; i < numel; ++i) { - EXPECT_EQ(expect[i], actual[i]); +template +void CheckValues(T* expect, U* actual, const paddle::framework::LoD& expect_lod, + const paddle::framework::LoD& actual_lod, const int& numel) { + for (int i = 0; i < numel; ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); } EXPECT_EQ(expect_lod.size(), actual_lod.size()); for (size_t i = 0; i < expect_lod.size(); ++i) { @@ -78,26 +83,26 @@ TEST(SaveLoadCombineOp, CPU) { std::vector lod1 = {0, 1, 2, 3, 10}; int numel1 = 100; paddle::framework::LoD expect_lod1; - int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", place, - &scope, &expect_lod1); + int* expect1 = CreateForSaveCombineOp(10, 10, lod1, "test_var1", + place, &scope, &expect_lod1); std::vector lod2 = {0, 2, 5, 10}; int numel2 = 200; paddle::framework::LoD expect_lod2; - int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", place, - &scope, &expect_lod2); + int* expect2 = CreateForSaveCombineOp(10, 20, lod2, "test_var2", + place, &scope, &expect_lod2); std::vector lod3 = {0, 2, 3, 20}; int numel3 = 4000; paddle::framework::LoD expect_lod3; - int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", place, - &scope, &expect_lod3); + int* expect3 = CreateForSaveCombineOp(20, 200, lod3, "test_var3", + place, &scope, &expect_lod3); std::vector lod4 = {0, 1, 20}; int numel4 = 1000; paddle::framework::LoD expect_lod4; - int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", place, - &scope, &expect_lod4); + int* expect4 = CreateForSaveCombineOp(20, 50, lod4, "test_var4", + place, &scope, &expect_lod4); // Set attributes std::string filename = "check_tensor.ls"; @@ -123,15 +128,176 @@ TEST(SaveLoadCombineOp, CPU) { load_combine_op->Run(scope, place); paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; - int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1); - int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2); - int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3); - int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4); - - CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); - CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); - CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); - CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); + int* actual1 = GetValuesAfterLoadCombineOp(target1, scope, &actual_lod1); + int* actual2 = GetValuesAfterLoadCombineOp(target2, scope, &actual_lod2); + int* actual3 = GetValuesAfterLoadCombineOp(target3, scope, &actual_lod3); + int* actual4 = GetValuesAfterLoadCombineOp(target4, scope, &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, actual_lod4, numel4); +} + +// FP16 version of SaveLoadCombineOp Test, only altering the saving aspect +// to save as FP16. +TEST(SaveCombineFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + std::vector lod1 = {0, 1, 2, 3, 10}; + int numel1 = 100; + paddle::framework::LoD expect_lod1; + float* expect1 = CreateForSaveCombineOp( + 10, 10, lod1, "test_var1", place, &scope, &expect_lod1); + + std::vector lod2 = {0, 2, 5, 10}; + int numel2 = 200; + paddle::framework::LoD expect_lod2; + float* expect2 = CreateForSaveCombineOp( + 10, 20, lod2, "test_var2", place, &scope, &expect_lod2); + + std::vector lod3 = {0, 20}; + int numel3 = 4000; + paddle::framework::LoD expect_lod3; + float* expect3 = CreateForSaveCombineOp( + 20, 200, lod3, "test_var3", place, &scope, &expect_lod3); + + std::vector lod4 = {0, 1, 20}; + int numel4 = 1000; + paddle::framework::LoD expect_lod4; + float* expect4 = CreateForSaveCombineOp( + 20, 50, lod4, "test_var4", place, &scope, &expect_lod4); + + // Set attributes + std::string filename = "check_tensor_fp16_save.ls"; + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string(filename)}); + attrs.insert({"save_as_fp16", true}); + + // Run the save_combine_op + auto save_combine_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", + {{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs); + save_combine_op->Run(scope, place); + + // Set up output vars + auto target1 = GeneratePlaceholderBeforeLoad("out_var1", &scope); + auto target2 = GeneratePlaceholderBeforeLoad("out_var2", &scope); + auto target3 = GeneratePlaceholderBeforeLoad("out_var3", &scope); + auto target4 = GeneratePlaceholderBeforeLoad("out_var4", &scope); + + // Run the load_combine_op + auto load_combine_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, + {{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs); + load_combine_op->Run(scope, place); + + paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; + paddle::platform::float16* actual1 = + GetValuesAfterLoadCombineOp(target1, scope, + &actual_lod1); + paddle::platform::float16* actual2 = + GetValuesAfterLoadCombineOp(target2, scope, + &actual_lod2); + paddle::platform::float16* actual3 = + GetValuesAfterLoadCombineOp(target3, scope, + &actual_lod3); + paddle::platform::float16* actual4 = + GetValuesAfterLoadCombineOp(target4, scope, + &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, + actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, + actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, + actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, + actual_lod4, numel4); +} + +// FP16 version of SaveLoadCombineOp Test, only altering the loading aspect +// to load tensors with FP16 precision. +TEST(LoadCombineFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + std::vector lod1 = {0, 1, 2, 3, 10}; + int numel1 = 100; + paddle::framework::LoD expect_lod1; + float* expect1 = CreateForSaveCombineOp( + 10, 10, lod1, "test_var1", place, &scope, &expect_lod1); + + std::vector lod2 = {0, 2, 5, 10}; + int numel2 = 200; + paddle::framework::LoD expect_lod2; + float* expect2 = CreateForSaveCombineOp( + 10, 20, lod2, "test_var2", place, &scope, &expect_lod2); + + std::vector lod3 = {0, 20}; + int numel3 = 4000; + paddle::framework::LoD expect_lod3; + float* expect3 = CreateForSaveCombineOp( + 20, 200, lod3, "test_var3", place, &scope, &expect_lod3); + + std::vector lod4 = {0, 1, 20}; + int numel4 = 1000; + paddle::framework::LoD expect_lod4; + float* expect4 = CreateForSaveCombineOp( + 20, 50, lod4, "test_var4", place, &scope, &expect_lod4); + + // Set attributes + std::string filename = "check_tensor_fp16_load.ls"; + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string(filename)}); + + // Run the save_combine_op + auto save_combine_op = paddle::framework::OpRegistry::CreateOp( + "save_combine", + {{"X", {"test_var1", "test_var2", "test_var3", "test_var4"}}}, {}, attrs); + save_combine_op->Run(scope, place); + + // Set up output vars + auto load_var1 = scope.Var("out_var1"); + auto load_var2 = scope.Var("out_var2"); + auto load_var3 = scope.Var("out_var3"); + auto load_var4 = scope.Var("out_var4"); + + attrs.insert({"load_as_fp16", true}); + // Run the load_combine_op + auto load_combine_op = paddle::framework::OpRegistry::CreateOp( + "load_combine", {}, + {{"Out", {"out_var1", "out_var2", "out_var3", "out_var4"}}}, attrs); + load_combine_op->Run(scope, place); + + auto* target1 = load_var1->GetMutable(); + auto* target2 = load_var2->GetMutable(); + auto* target3 = load_var3->GetMutable(); + auto* target4 = load_var4->GetMutable(); + + paddle::framework::LoD actual_lod1, actual_lod2, actual_lod3, actual_lod4; + paddle::platform::float16* actual1 = + GetValuesAfterLoadCombineOp(target1, scope, + &actual_lod1); + paddle::platform::float16* actual2 = + GetValuesAfterLoadCombineOp(target2, scope, + &actual_lod2); + paddle::platform::float16* actual3 = + GetValuesAfterLoadCombineOp(target3, scope, + &actual_lod3); + paddle::platform::float16* actual4 = + GetValuesAfterLoadCombineOp(target4, scope, + &actual_lod4); + + CheckValues(expect1, actual1, expect_lod1, + actual_lod1, numel1); + CheckValues(expect2, actual2, expect_lod2, + actual_lod2, numel2); + CheckValues(expect3, actual3, expect_lod3, + actual_lod3, numel3); + CheckValues(expect4, actual4, expect_lod4, + actual_lod4, numel4); } // Test with original SaveLoadTest @@ -141,7 +307,7 @@ TEST(SaveLoadTestWithCombineOp, CPU) { auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); - tensor->Resize({3, 10}); + tensor->Resize({3, 4000}); paddle::framework::LoD expect_lod; expect_lod.resize(1); expect_lod[0].push_back(0); diff --git a/paddle/fluid/operators/save_load_op_test.cc b/paddle/fluid/operators/save_load_op_test.cc index 74385ee475..ccaea0eef2 100644 --- a/paddle/fluid/operators/save_load_op_test.cc +++ b/paddle/fluid/operators/save_load_op_test.cc @@ -63,14 +63,21 @@ TEST(SaveLoadOp, CPU) { } } -TEST(SaveLoadFP16Op, CPU) { +TEST(SaveFP16Op, CPU) { paddle::framework::Scope scope; paddle::platform::CPUPlace place; auto var = scope.Var("test_var"); auto tensor = var->GetMutable(); tensor->Resize({3, 10}); + paddle::framework::LoD expect_lod; + expect_lod.resize(1); + expect_lod[0].push_back(0); + expect_lod[0].push_back(1); + expect_lod[0].push_back(2); + expect_lod[0].push_back(3); + tensor->set_lod(expect_lod); float* expect = tensor->mutable_data(place); for (int64_t i = 0; i < tensor->numel(); ++i) { expect[i] = static_cast(paddle::platform::float16(i)); @@ -93,4 +100,61 @@ TEST(SaveLoadFP16Op, CPU) { for (int64_t i = 0; i < tensor->numel(); ++i) { EXPECT_EQ(expect[i], static_cast(actual[i])); } + auto& actual_lod = target->lod(); + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } +} + +TEST(LoadFP16Op, CPU) { + paddle::framework::Scope scope; + paddle::platform::CPUPlace place; + + auto var = scope.Var("test_var"); + auto tensor = var->GetMutable(); + tensor->Resize({3, 10}); + + paddle::framework::LoD expect_lod; + expect_lod.resize(1); + expect_lod[0].push_back(0); + expect_lod[0].push_back(1); + expect_lod[0].push_back(2); + expect_lod[0].push_back(3); + + tensor->set_lod(expect_lod); + float* expect = tensor->mutable_data(place); + for (int64_t i = 0; i < tensor->numel(); ++i) { + expect[i] = static_cast(paddle::platform::float16(i)); + } + + paddle::framework::AttributeMap attrs; + attrs.insert({"file_path", std::string("tensor.save")}); + attrs.insert({"load_as_fp16", true}); + + auto save_op = paddle::framework::OpRegistry::CreateOp( + "save", {{"X", {"test_var"}}}, {}, attrs); + save_op->Run(scope, place); + + auto load_var = scope.Var("out_var"); + load_var->GetMutable(); + auto load_op = paddle::framework::OpRegistry::CreateOp( + "load", {}, {{"Out", {"out_var"}}}, attrs); + load_op->Run(scope, place); + + auto target = load_var->Get(); + paddle::platform::float16* actual = target.data(); + for (int64_t i = 0; i < tensor->numel(); ++i) { + EXPECT_EQ(expect[i], static_cast(actual[i])); + } + + auto& actual_lod = target.lod(); + EXPECT_EQ(expect_lod.size(), actual_lod.size()); + for (size_t i = 0; i < expect_lod.size(); ++i) { + for (size_t j = 0; j < expect_lod[i].size(); ++j) { + EXPECT_EQ(expect_lod[i][j], actual_lod[i][j]); + } + } } diff --git a/paddle/fluid/operators/save_op.cc b/paddle/fluid/operators/save_op.cc index dcc1b9ec20..201a51130d 100644 --- a/paddle/fluid/operators/save_op.cc +++ b/paddle/fluid/operators/save_op.cc @@ -22,11 +22,17 @@ limitations under the License. */ #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/selected_rows.h" +#include "paddle/fluid/framework/variable.h" #include "paddle/fluid/platform/device_context.h" namespace paddle { namespace operators { +// define LOOKUP_TABLE_PATH for checkpoint notify to save lookup table variables +// to directory specified. +constexpr char LOOKUP_TABLE_PATH[] = "kLookupTablePath"; + // TODO(yuyang18): If the functions below are needed by other files, move them // to paddle::filesystem namespace. constexpr char kSEP = '/'; @@ -67,9 +73,27 @@ class SaveOp : public framework::OperatorBase { private: void RunImpl(const framework::Scope &scope, const platform::Place &place) const override { + auto iname = Input("X"); + auto *var = scope.FindVar(iname); + PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s for save_op", + iname); + + if (var->IsType()) { + SaveLodTensor(place, var); + } else if (var->IsType()) { + SaveSelectedRows(scope, place, var); + } else { + PADDLE_ENFORCE( + false, + "SaveOp only support LoDTensor and SelectedRows, %s has wrong type", + iname); + } + } + + void SaveLodTensor(const platform::Place &place, + framework::Variable *var) const { auto filename = Attr("file_path"); auto overwrite = Attr("overwrite"); - auto save_as_fp16 = Attr("save_as_fp16"); if (FileExists(filename) && !overwrite) { PADDLE_THROW("%s is existed, cannot save to it when overwrite=false", @@ -78,26 +102,19 @@ class SaveOp : public framework::OperatorBase { MkDirRecursively(DirName(filename).c_str()); - // FIXME(yuyang18): We save variable to local file now, but we should change - // it to save an output stream. - std::ofstream fout(filename); - PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", - filename); - - auto iname = Input("X"); - auto *var = scope.FindVar(iname); - PADDLE_ENFORCE(var != nullptr, "Cannot find variable %s for save_op", - iname); - - PADDLE_ENFORCE(var->IsType(), - "SaveOp only support LoDTensor, %s has wrong type", iname); - auto &tensor = var->Get(); // get device context from pool platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); + // FIXME(yuyang18): We save variable to local file now, but we should change + // it to save an output stream. + std::ofstream fout(filename); + PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", + filename); + + auto save_as_fp16 = Attr("save_as_fp16"); auto in_dtype = framework::ToDataType(tensor.type()); auto out_dtype = save_as_fp16 ? framework::proto::VarType::FP16 : in_dtype; @@ -112,18 +129,43 @@ class SaveOp : public framework::OperatorBase { } else { framework::SerializeToStream(fout, tensor, dev_ctx); } + fout.close(); + } + + void SaveSelectedRows(const framework::Scope &scope, + const platform::Place &place, + framework::Variable *var) const { + auto *lt_var = scope.FindVar(LOOKUP_TABLE_PATH)->GetMutable(); + PADDLE_ENFORCE( + lt_var != nullptr, + "Can not find variable kLookupTablePath for SaveSelectedRows"); + std::string filename = lt_var->data(); + VLOG(4) << "SaveSelectedRows get File name: " << filename; + + auto &selectedRows = var->Get(); + + // get device context from pool + platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); + auto &dev_ctx = *pool.Get(place); + + // FIXME(yuyang18): We save variable to local file now, but we should change + // it to save an output stream. + std::ofstream fout(filename); + PADDLE_ENFORCE(static_cast(fout), "Cannot open %s to write", + filename); + framework::SerializeToStream(fout, selectedRows, dev_ctx); + fout.close(); } }; class SaveOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SaveOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor ) Input tensor to be saved"); + void Make() override { + AddInput("X", "(Tensor ) Input LoDTensor and SelectedRows to be saved"); AddComment(R"DOC( Save operator -This operator will serialize and write a tensor variable to file on disk. +This operator will serialize and write LoDTensor / SelectedRows variable to file on disk. )DOC"); AddAttr("overwrite", "(boolean, default true)" @@ -143,9 +185,26 @@ This operator will serialize and write a tensor variable to file on disk. } }; +class SaveOpVarTypeInference : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override { + auto out_var_name = op_desc.Output(LOOKUP_TABLE_PATH).front(); + auto &out_var = block->FindRecursiveOrCreateVar(out_var_name); + auto var_type = framework::proto::VarType::RAW; + out_var.SetType(var_type); + } +}; + +class SaveOpShapeInference : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override {} +}; } // namespace operators } // namespace paddle namespace ops = paddle::operators; -REGISTER_OPERATOR(save, ops::SaveOp, ops::SaveOpProtoMaker); +REGISTER_OPERATOR(save, ops::SaveOp, paddle::framework::EmptyGradOpMaker, + ops::SaveOpProtoMaker, ops::SaveOpVarTypeInference, + ops::SaveOpShapeInference); diff --git a/paddle/fluid/operators/scale_op.cc b/paddle/fluid/operators/scale_op.cc index 7dcf33c989..7f8822e400 100644 --- a/paddle/fluid/operators/scale_op.cc +++ b/paddle/fluid/operators/scale_op.cc @@ -37,18 +37,17 @@ class ScaleOp : public framework::OperatorWithKernel { class ScaleOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScaleOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of scale operator."); AddOutput("Out", "(Tensor) Output tensor of scale operator."); AddComment(R"DOC( -Scale operator +**Scale operator** + +Multiply the input tensor with a float scalar to scale the input tensor. $$Out = scale*X$$ )DOC"); - AddAttr("scale", - "(float, default 1.0)" - "The scaling factor of the scale operator.") + AddAttr("scale", "The scaling factor of the scale operator.") .SetDefault(1.0); } }; diff --git a/paddle/fluid/operators/scatter_op.cc b/paddle/fluid/operators/scatter_op.cc index 95b12455ea..bf5e0d8644 100644 --- a/paddle/fluid/operators/scatter_op.cc +++ b/paddle/fluid/operators/scatter_op.cc @@ -78,8 +78,7 @@ class ScatterGradOp : public framework::OperatorWithKernel { class ScatterOpMaker : public framework::OpProtoAndCheckerMaker { public: - ScatterOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The source input of scatter op"); AddInput("Ids", "The index input of scatter op where X will be updated"); AddInput("Updates", "The updated value of updates op"); diff --git a/paddle/fluid/operators/select_op.cc b/paddle/fluid/operators/select_op.cc index 876d8acf0d..e71841d4d1 100644 --- a/paddle/fluid/operators/select_op.cc +++ b/paddle/fluid/operators/select_op.cc @@ -380,8 +380,7 @@ class SelectOp : public framework::OperatorBase { class SelectOpMaker : public framework::OpProtoAndCheckerMaker { public: - SelectOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kX, "A set of variables, which are required by operators inside the " "cases of Select Op") diff --git a/paddle/fluid/operators/send_barrier_op.cc b/paddle/fluid/operators/send_barrier_op.cc index 12b844daaa..1866a86048 100644 --- a/paddle/fluid/operators/send_barrier_op.cc +++ b/paddle/fluid/operators/send_barrier_op.cc @@ -19,8 +19,9 @@ limitations under the License. */ #include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/operators/detail/macros.h" -#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/platform/profiler.h" namespace paddle { namespace operators { @@ -36,32 +37,28 @@ class SendBarrierOp : public framework::OperatorBase { void RunImpl(const framework::Scope& scope, const platform::Place& place) const override { std::vector eps = Attr>("endpoints"); + bool sync_mode = Attr("sync_mode"); - auto client_var_name = Output("RPCClient"); - PADDLE_ENFORCE_NOT_NULL(scope.FindVar(client_var_name), - "Can not find variable '%s' in the scope.", - client_var_name); - auto* client_var = scope.FindVar(client_var_name); - detail::RPCClient* rpc_client = client_var->GetMutable(); + distributed::RPCClient* rpc_client = + distributed::RPCClient::GetInstance(); - // need to wait before sending send_barrier message - PADDLE_ENFORCE(rpc_client->Wait()); + VLOG(3) << "SendBarrierOp sync_mode:" << sync_mode; - for (auto& ep : eps) { - VLOG(3) << "send barrier, ep: " << ep; - rpc_client->AsyncSendBatchBarrier(ep); + // need to wait before sending send_barrier message + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); + if (sync_mode) { + for (auto& ep : eps) { + VLOG(3) << "send barrier, ep: " << ep; + rpc_client->AsyncSendBatchBarrier(ep); + } + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); } - PADDLE_ENFORCE(rpc_client->Wait()); } }; class SendBarrierOpMaker : public framework::OpProtoAndCheckerMaker { public: - SendBarrierOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddOutput("RPCClient", - "(RPCClient) The RPC client object which is" - "initialized at most once."); + void Make() { AddComment(R"DOC( SendBarrier operator @@ -73,17 +70,7 @@ the Parameter Server would knew all variables have been sent. "(string vector, default 127.0.0.1:6164)" "Server endpoints to send variables to.") .SetDefault({"127.0.0.1:6164"}); - } -}; - -class SendBarrierOpVarTypeInference : public framework::VarTypeInference { - public: - void operator()(const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override { - auto out_var_name = op_desc.Output("RPCClient").front(); - auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarType::RAW; - out_var.SetType(var_type); + AddAttr("sync_mode", "work in sync_mode or not").SetDefault(true); } }; @@ -99,5 +86,4 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(send_barrier, ops::SendBarrierOp, paddle::framework::EmptyGradOpMaker, ops::SendBarrierOpMaker, - ops::SendBarrierOpVarTypeInference, ops::SendBarrierOpShapeInference); diff --git a/paddle/fluid/operators/send_op.cc b/paddle/fluid/operators/send_op.cc index e4386b640a..3cd42f2d05 100644 --- a/paddle/fluid/operators/send_op.cc +++ b/paddle/fluid/operators/send_op.cc @@ -16,10 +16,9 @@ limitations under the License. */ #include #include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/framework.pb.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/detail/grpc_client.h" +#include "paddle/fluid/operators/detail/macros.h" #include "paddle/fluid/operators/send_recv_util.h" #include "paddle/fluid/platform/profiler.h" @@ -36,98 +35,51 @@ class SendOp : public framework::OperatorBase { void RunImpl(const framework::Scope& scope, const platform::Place& place) const override { auto ins = Inputs("X"); - auto outs = Outputs("Out"); - std::vector epmap = Attr>("epmap"); - std::vector endpoints = - Attr>("endpoints"); - bool sync_mode = Attr("sync_mode"); + std::vector epmap = Attr>("epmap"); + int sync_send = Attr("sync_mode"); platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); auto& ctx = *pool.Get(place); - // For profiling - platform::RecordEvent record_event(Type(), &ctx); - - auto client_var_name = Output("RPCClient"); - PADDLE_ENFORCE_NOT_NULL(scope.FindVar(client_var_name), - "Can not find variable '%s' in the scope.", - client_var_name); - auto* client_var = scope.FindVar(client_var_name); - detail::RPCClient* rpc_client = client_var->GetMutable(); + distributed::RPCClient* rpc_client = + distributed::RPCClient::GetInstance(); for (size_t i = 0; i < ins.size(); i++) { if (NeedSend(scope, ins[i])) { VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; - rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); + // TODO(Yancey1989): we need to use an IO threadpool which has + // a larger number of threads than the computing threadpool. + rpc_client->AsyncSendVar(epmap[i], ctx, scope, ins[i]); } else { VLOG(3) << "don't send no-initialied variable: " << ins[i]; } } - PADDLE_ENFORCE(rpc_client->Wait()); - - if (sync_mode) { - for (auto& ep : endpoints) { - VLOG(3) << "batch barrier, ep: " << ep; - rpc_client->AsyncSendBatchBarrier(ep); - } - PADDLE_ENFORCE(rpc_client->Wait()); - } - - if (outs.size() > 0) { - for (size_t i = 0; i < outs.size(); i++) { - VLOG(2) << "getting " << outs[i] << " from " << epmap[i]; - rpc_client->AsyncGetVariable(epmap[i], ctx, scope, outs[i]); - } - PADDLE_ENFORCE(rpc_client->Wait()); - // tell pservers that current trainer have called fetch - for (auto& ep : endpoints) { - VLOG(2) << "send fetch barrier, ep: " << ep; - rpc_client->AsyncSendFetchBarrier(ep); - } - PADDLE_ENFORCE(rpc_client->Wait()); + if (sync_send) { + PADDLE_ENFORCE(rpc_client->Wait(), "internal error in RPCClient"); } } }; class SendOpMaker : public framework::OpProtoAndCheckerMaker { public: - SendOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor) Input tensor to be sent").AsDuplicable(); - AddOutput("Out", "(Tensor) Output tensor to be received from server") + void Make() { + AddInput("X", "(Tensor, SelectedRows) Input variables to be sent") .AsDuplicable(); - AddOutput("RPCClient", - "(RPCClient) The RPC client object which is" - "initialized at most once."); AddComment(R"DOC( Send operator -This operator will send tensor to recv_op at the parameter server. +This operator will send variables to listen_and_serve op at the parameter server. )DOC"); - // TODO(typhoonzero): remove this attr generate de-duplicated vector from - // epmap when initializing. - AddAttr>("endpoints", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints to send variables to.") - .SetDefault({}); + AddAttr("sync_mode", + "(int, default 0)" + "sync send or async send.") + .SetDefault(0); AddAttr>("epmap", "(string vector, default 127.0.0.1:6164)" "Server endpoints in the order of input " "variables for mapping") - .SetDefault({}); - AddAttr("sync_mode", "work in sync_mode or not").SetDefault(true); - } -}; - -class SendOpVarTypeInference : public framework::VarTypeInference { - public: - void operator()(const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override { - auto out_var_name = op_desc.Output("RPCClient").front(); - auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarType::RAW; - out_var.SetType(var_type); + .SetDefault({"127.0.0.1:6164"}); } }; @@ -142,5 +94,4 @@ class SendOpShapeInference : public framework::InferShapeBase { namespace ops = paddle::operators; REGISTER_OPERATOR(send, ops::SendOp, paddle::framework::EmptyGradOpMaker, - ops::SendOpMaker, ops::SendOpVarTypeInference, - ops::SendOpShapeInference); + ops::SendOpMaker, ops::SendOpShapeInference); diff --git a/paddle/fluid/operators/send_recv_op_test.cc b/paddle/fluid/operators/send_recv_op_test.cc index eb51f301bf..aee6180add 100644 --- a/paddle/fluid/operators/send_recv_op_test.cc +++ b/paddle/fluid/operators/send_recv_op_test.cc @@ -92,12 +92,16 @@ void InitSelectedRowsInScope(const p::CPUPlace &place, f::Scope *scope) { void AddOp(const std::string &type, const f::VariableNameMap &inputs, const f::VariableNameMap &outputs, f::AttributeMap attrs, - f::BlockDesc *block) { + f::BlockDesc *block, bool is_sparse) { // insert output for (auto kv : outputs) { for (auto v : kv.second) { auto var = block->Var(v); var->SetDataType(f::proto::VarType::FP32); + var->SetPersistable(true); + if (is_sparse) { + var->SetType(f::proto::VarType::SELECTED_ROWS); + } } } @@ -125,16 +129,20 @@ void StartServerNet(bool is_sparse, std::atomic *initialized) { // sub program run in listen_and_serv_op, for simple test we use sum f::ProgramDesc program; const auto &root_block = program.Block(0); + std::vector optimize_blocks; auto *optimize_block = program.AppendBlock(root_block); + optimize_blocks.push_back(optimize_block); + auto *prefetch_block = program.AppendBlock(root_block); // X for server side tensors, RX for received tensors, must be of same shape. - AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block); + AddOp("sum", {{"X", {"x0", "x1"}}}, {{"Out", {"Out"}}}, {}, optimize_block, + is_sparse); f::AttributeMap attrs; attrs.insert({"endpoint", std::string("127.0.0.1:0")}); attrs.insert({"Fanin", 1}); attrs.insert({"ParamList", std::vector({"Out"})}); attrs.insert({"GradList", std::vector({"x1"})}); - attrs.insert({"OptimizeBlock", optimize_block}); + attrs.insert({"optimize_blocks", optimize_blocks}); attrs.insert({"PrefetchBlock", prefetch_block}); attrs.insert({"grad_to_block_id", std::vector({""})}); attrs.insert({"sync_mode", true}); @@ -151,6 +159,7 @@ TEST(SendRecvOp, CPUDense) { std::thread server_thread(StartServerNet, false, &initialized); while (!initialized) { } + static_cast(listen_and_serv_op.get()) ->WaitServerReady(); @@ -170,9 +179,10 @@ TEST(SendRecvOp, CPUDense) { std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); attrs.insert({"epmap", std::vector({endpoint})}); - auto send_op = f::OpRegistry::CreateOp( - "send", {{"X", {"x1"}}}, - {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); + const f::VariableNameMap &inputs = {{"X", {"x1"}}}; + const f::VariableNameMap &outputs = {{"Out", {"Out"}}}; + + auto send_op = f::OpRegistry::CreateOp("send", inputs, outputs, attrs); send_op->Run(scope, place); auto in_var = scope.Var("x1"); @@ -215,9 +225,8 @@ TEST(SendRecvOp, CPUSparse) { std::string endpoint = paddle::string::Sprintf("127.0.0.1:%d", selected_port); attrs.insert({"endpoints", std::vector({endpoint})}); attrs.insert({"epmap", std::vector({endpoint})}); - auto send_op = f::OpRegistry::CreateOp( - "send", {{"X", {"x1"}}}, - {{"Out", {"Out"}}, {"RPCClient", {"RPC_CLIENT_VAR"}}}, attrs); + auto send_op = f::OpRegistry::CreateOp("send", {{"X", {"x1"}}}, + {{"Out", {"Out"}}}, attrs); send_op->Run(scope, place); auto x0 = scope.Var("x0")->GetMutable(); diff --git a/paddle/fluid/operators/send_recv_util.h b/paddle/fluid/operators/send_recv_util.h index 113513eb6b..dc26c53c64 100644 --- a/paddle/fluid/operators/send_recv_util.h +++ b/paddle/fluid/operators/send_recv_util.h @@ -14,12 +14,19 @@ limitations under the License. */ #pragma once #include +#include "paddle/fluid/framework/ir/node.h" namespace paddle { namespace operators { inline bool NeedSend(const framework::Scope& scope, const std::string& varname) { + // dummy variable is only used in parallel executor to represent + // some dependency relationship, we don't need to send/recv it. + // TODO(paddle-dev): Why would parallel executor logic leaked into here? + if (varname.find(framework::ir::Node::kControlDepVarName) != + std::string::npos) + return false; auto* var = scope.FindVar(varname); PADDLE_ENFORCE_NOT_NULL(var, "Can not find variable '%s' in the send side.", varname); diff --git a/paddle/fluid/operators/send_vars_op.cc b/paddle/fluid/operators/send_vars_op.cc deleted file mode 100644 index 56b3713d6a..0000000000 --- a/paddle/fluid/operators/send_vars_op.cc +++ /dev/null @@ -1,117 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include // NOLINT -#include - -#include "paddle/fluid/framework/data_type.h" -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/operators/detail/grpc_client.h" -#include "paddle/fluid/operators/send_recv_util.h" - -namespace paddle { -namespace operators { - -class SendVarsOp : public framework::OperatorBase { - public: - SendVarsOp(const std::string& type, const framework::VariableNameMap& inputs, - const framework::VariableNameMap& outputs, - const framework::AttributeMap& attrs) - : OperatorBase(type, inputs, outputs, attrs) {} - - void RunImpl(const framework::Scope& scope, - const platform::Place& place) const override { - auto ins = Inputs("X"); - - std::vector epmap = Attr>("epmap"); - int sync_send = Attr("sync_send"); - - platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance(); - auto& ctx = *pool.Get(place); - - auto client_var_name = Output("RPCClient"); - PADDLE_ENFORCE_NOT_NULL(scope.FindVar(client_var_name), - "Can not find variable '%s' in the scope.", - client_var_name); - auto* client_var = scope.FindVar(client_var_name); - detail::RPCClient* rpc_client = client_var->GetMutable(); - - for (size_t i = 0; i < ins.size(); i++) { - if (NeedSend(scope, ins[i])) { - VLOG(3) << "sending " << ins[i] << " to " << epmap[i]; - // TODO(Yancey1989): we need to use an IO threadpool which has - // a larger number of threads than the computing threadpool. - rpc_client->AsyncSendVariable(epmap[i], ctx, scope, ins[i]); - } else { - VLOG(3) << "don't send no-initialied variable: " << ins[i]; - } - } - if (sync_send) { - rpc_client->Wait(); - } - } -}; - -class SendVarsOpMaker : public framework::OpProtoAndCheckerMaker { - public: - SendVarsOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { - AddInput("X", "(Tensor, SelectedRows) Input variables to be sent") - .AsDuplicable(); - AddOutput("RPCClient", - "(RPCClient) The RPC client object which will be" - "initialized at most once."); - AddComment(R"DOC( -Send operator - -This operator will send variables to listen_and_serve op at the parameter server. -)DOC"); - AddAttr("sync_send", - "(int, default 0)" - "sync send or async send.") - .SetDefault(0); - AddAttr>("epmap", - "(string vector, default 127.0.0.1:6164)" - "Server endpoints in the order of input " - "variables for mapping") - .SetDefault({"127.0.0.1:6164"}); - } -}; - -class SendVarsOpVarTypeInference : public framework::VarTypeInference { - public: - void operator()(const framework::OpDesc& op_desc, - framework::BlockDesc* block) const override { - auto out_var_name = op_desc.Output("RPCClient").front(); - auto& out_var = block->FindRecursiveOrCreateVar(out_var_name); - auto var_type = framework::proto::VarType::RAW; - out_var.SetType(var_type); - } -}; - -class SendVarsOpShapeInference : public framework::InferShapeBase { - public: - void operator()(framework::InferShapeContext* ctx) const override {} -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -REGISTER_OPERATOR(send_vars, ops::SendVarsOp, - paddle::framework::EmptyGradOpMaker, ops::SendVarsOpMaker, - ops::SendVarsOpVarTypeInference, - ops::SendVarsOpShapeInference); diff --git a/paddle/fluid/operators/sequence_concat_op.cc b/paddle/fluid/operators/sequence_concat_op.cc index 3c21903e3a..077b9a5f7d 100644 --- a/paddle/fluid/operators/sequence_concat_op.cc +++ b/paddle/fluid/operators/sequence_concat_op.cc @@ -43,8 +43,7 @@ class SequenceConcatOp : public framework::OperatorWithKernel { class SequenceConcatOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceConcatOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LodTensorArray) Input is a vector of LoDTensor, " "each of which is a variable-length sequence or nested sequence.") diff --git a/paddle/fluid/operators/sequence_conv_op.cc b/paddle/fluid/operators/sequence_conv_op.cc index 94f4b49b00..ec6cb24350 100644 --- a/paddle/fluid/operators/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_conv_op.cc @@ -102,8 +102,7 @@ class SequenceConvGradOp : public framework::OperatorWithKernel { class SequenceConvOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceConvOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(LoDTensor) the input(X) is a LodTensor, which supports " diff --git a/paddle/fluid/operators/sequence_erase_op.cc b/paddle/fluid/operators/sequence_erase_op.cc index 73c0e89512..1c86486157 100644 --- a/paddle/fluid/operators/sequence_erase_op.cc +++ b/paddle/fluid/operators/sequence_erase_op.cc @@ -37,8 +37,7 @@ class SequenceEraseOp : public framework::OperatorWithKernel { class SequenceEraseOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceEraseOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(2-D LoDTensor with the 2nd dim. equal to 1) " "Input LoDTensor of SequenceEraseOp."); diff --git a/paddle/fluid/operators/sequence_expand_op.cc b/paddle/fluid/operators/sequence_expand_op.cc index 84a35d7172..944c7f85e5 100644 --- a/paddle/fluid/operators/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_expand_op.cc @@ -94,8 +94,7 @@ class SequenceExpandOp : public framework::OperatorWithKernel { class SequenceExpandOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceExpandOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, default LoDTensor) A 2-D LoDTensor whose lod " "level is at most 1."); diff --git a/paddle/fluid/operators/sequence_expand_op.h b/paddle/fluid/operators/sequence_expand_op.h index d62c387c3e..39301e1ac0 100644 --- a/paddle/fluid/operators/sequence_expand_op.h +++ b/paddle/fluid/operators/sequence_expand_op.h @@ -151,9 +151,6 @@ struct SequenceExpandGradFunctor { const framework::Vector& x_lod, /*expand source lod*/ const framework::Vector& ref_lod, /*expand referenced lod*/ LoDTensor* dx) { - math::SetConstant set_zero; - set_zero(context, dx, static_cast(0)); - int dout_offset = 0; for (size_t i = 1; i < ref_lod.size(); ++i) { int repeat_num = ref_lod[i] - ref_lod[i - 1]; @@ -187,6 +184,10 @@ class SequenceExpandGradKernel : public framework::OpKernel { g_x->mutable_data(context.GetPlace()); g_x->set_lod(x->lod()); + auto& dev_ctx = context.template device_context(); + math::SetConstant set_zero; + set_zero(dev_ctx, g_x, static_cast(0)); + auto& y_lod = y->lod(); if (ref_level == -1) ref_level = y_lod.size() - 1; // just copy the gradient diff --git a/paddle/fluid/operators/sequence_pool_op.cc b/paddle/fluid/operators/sequence_pool_op.cc index 933c8c2623..5c6fd13d42 100644 --- a/paddle/fluid/operators/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_pool_op.cc @@ -38,8 +38,7 @@ class SequencePoolOp : public framework::OperatorWithKernel { class SequencePoolOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequencePoolOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The variable-length input of SequencePoolOp"); AddOutput("Out", "(Tensor) The output of SequencePoolOp does not contain LoD " diff --git a/paddle/fluid/operators/sequence_reshape_op.cc b/paddle/fluid/operators/sequence_reshape_op.cc index a2999650b8..ef5e6f3210 100644 --- a/paddle/fluid/operators/sequence_reshape_op.cc +++ b/paddle/fluid/operators/sequence_reshape_op.cc @@ -42,8 +42,7 @@ class SequenceReshapeOp : public framework::OperatorWithKernel { class SequenceReshapeOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceReshapeOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor, default LoDTensor) A 2-D LoDTensor with shape " "being [N, M]."); diff --git a/paddle/fluid/operators/sequence_slice_op.cc b/paddle/fluid/operators/sequence_slice_op.cc index 7cd620af07..df9243dc04 100644 --- a/paddle/fluid/operators/sequence_slice_op.cc +++ b/paddle/fluid/operators/sequence_slice_op.cc @@ -79,8 +79,7 @@ class SequenceSliceGradOp : public framework::OperatorWithKernel { class SequenceSliceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSliceOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor), " "the input of SequenceSliceOp."); diff --git a/paddle/fluid/operators/sequence_softmax_op.cc b/paddle/fluid/operators/sequence_softmax_op.cc index a0d47c12ba..c44f8206eb 100644 --- a/paddle/fluid/operators/sequence_softmax_op.cc +++ b/paddle/fluid/operators/sequence_softmax_op.cc @@ -57,8 +57,7 @@ class SequenceSoftmaxOp : public framework::OperatorWithKernel { class SequenceSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SequenceSoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) 1-D or 2-D input LoDTensor with the 2-nd dimension " "of length 1."); diff --git a/paddle/fluid/operators/sgd_op.cc b/paddle/fluid/operators/sgd_op.cc index bd04c60ffa..fef230e42d 100644 --- a/paddle/fluid/operators/sgd_op.cc +++ b/paddle/fluid/operators/sgd_op.cc @@ -68,14 +68,14 @@ class SGDOpInferVarType : public framework::VarTypeInference { class SGDOpMaker : public framework::OpProtoAndCheckerMaker { public: - SGDOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Param", "(Tensor or SelectedRows) Input parameter"); AddInput("LearningRate", "(Tensor) Learning rate of SGD"); AddInput("Grad", "(Tensor or SelectedRows) Input gradient"); AddOutput("ParamOut", "(Tensor or SelectedRows, same with Param) " - "Output parameter, should share the same memory with Param"); + "Output parameter, should share the same memory with Param") + .Reuse("Param"); AddComment(R"DOC( SGD operator diff --git a/paddle/fluid/operators/sgd_op.h b/paddle/fluid/operators/sgd_op.h index f3e88b0a0b..2685ce217e 100644 --- a/paddle/fluid/operators/sgd_op.h +++ b/paddle/fluid/operators/sgd_op.h @@ -96,8 +96,12 @@ class SGDOpKernel : public framework::OpKernel { return; } - size_t param_row_width = param.value().numel() / param.rows().size(); - size_t grad_row_width = grad.value().numel() / grad.rows().size(); + auto param_row_width = param.value().dims()[1]; + auto grad_row_width = grad.value().dims()[1]; + VLOG(4) << " param rows: " << param.rows().size() + << " param memory rows: " << param.value().dims()[0] + << " grad rows: " << grad.rows().size() + << " grad memory rows: " << grad.value().dims()[0]; PADDLE_ENFORCE_EQ(param_row_width, grad_row_width, "param_row should have the same size with grad_row"); @@ -110,7 +114,7 @@ class SGDOpKernel : public framework::OpKernel { int64_t id_index = param.Index(grad.rows()[i]); PADDLE_ENFORCE_GE(id_index, static_cast(0), "id should be in the table"); - for (size_t j = 0; j < grad_row_width; j++) { + for (int64_t j = 0; j < grad_row_width; j++) { out_data[id_index * grad_row_width + j] -= lr[0] * grad_data[i * grad_row_width + j]; } diff --git a/paddle/fluid/operators/shape_op.cc b/paddle/fluid/operators/shape_op.cc new file mode 100644 index 0000000000..b44d5f8980 --- /dev/null +++ b/paddle/fluid/operators/shape_op.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/shape_op.h" +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +class ShapeOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input (Input) of get_shape op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output (Out) of get_shape op should not be null."); + auto in_dim = ctx->GetInputDim("Input"); + ctx->SetOutputDim("Out", {in_dim.size()}); + } +}; + +class ShapeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Input", "(Tensor), The input tensor."); + AddOutput("Out", + "(Tensor), The shape of input tensor, the data type of the shape" + " is int64_t, will be on the same device with the input Tensor."); + AddComment(R"DOC( +Shape Operator + +Get the shape of input tensor. Only support CPU input Tensor now. +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(shape, ops::ShapeOp, ops::ShapeOpMaker, + paddle::framework::EmptyGradOpMaker); +REGISTER_OP_CPU_KERNEL(shape, ops::ShapeKernel, ops::ShapeKernel, + ops::ShapeKernel, ops::ShapeKernel); diff --git a/paddle/fluid/operators/shape_op.cu b/paddle/fluid/operators/shape_op.cu new file mode 100644 index 0000000000..7736a2a1e1 --- /dev/null +++ b/paddle/fluid/operators/shape_op.cu @@ -0,0 +1,20 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/shape_op.h" + +REGISTER_OP_CUDA_KERNEL(shape, paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel, + paddle::operators::ShapeKernel); diff --git a/paddle/fluid/operators/shape_op.h b/paddle/fluid/operators/shape_op.h new file mode 100644 index 0000000000..3be86b66a5 --- /dev/null +++ b/paddle/fluid/operators/shape_op.h @@ -0,0 +1,38 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +template +class ShapeKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + auto* in_t = ctx.Input("Input"); + auto* out_t = ctx.Output("Out"); + auto out_data = out_t->mutable_data(platform::CPUPlace()); + auto in_dims = in_t->dims(); + for (int i = 0; i < in_dims.size(); ++i) { + out_data[i] = in_dims[i]; + } + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/shrink_rnn_memory_op.cc b/paddle/fluid/operators/shrink_rnn_memory_op.cc index a1871a8e7f..8146c5f561 100644 --- a/paddle/fluid/operators/shrink_rnn_memory_op.cc +++ b/paddle/fluid/operators/shrink_rnn_memory_op.cc @@ -69,8 +69,7 @@ class ShrinkRNNMemoryOp : public ArrayOp { class ShrinkRNNMemoryOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ShrinkRNNMemoryOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) The RNN step memory to be shrinked."); AddInput("RankTable", "(LoDRankTable) The lod_rank_table of dynamic RNN."); AddInput("I", diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc index 5db77d0493..c3b0fe3209 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -86,9 +86,7 @@ class SigmoidCrossEntropyWithLogitsGradOp class SigmoidCrossEntropyWithLogitsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SigmoidCrossEntropyWithLogitsOpMaker(OpProto* proto, - OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor), a 2-D tensor with shape N x D, " "where N is the batch size and D is the number of classes. " @@ -115,14 +113,14 @@ The logistic loss is given as follows: $$loss = -Labels * \log(\sigma(X)) - (1 - Labels) * \log(1 - \sigma(X))$$ -We know that $$\sigma(X) = (1 / (1 + \exp(-X)))$$. By substituting this we get: +We know that $$\sigma(X) = \\frac{1}{1 + \exp(-X)}$$. By substituting this we get: $$loss = X - X * Labels + \log(1 + \exp(-X))$$ For stability and to prevent overflow of $$\exp(-X)$$ when X < 0, we reformulate the loss as follows: - $$loss = \max(X, 0) - X * Labels + \log(1 + \exp(-|X|))$$ + $$loss = \max(X, 0) - X * Labels + \log(1 + \exp(-\|X\|))$$ Both the input `X` and `Labels` can carry the LoD (Level of Details) information. However the output only shares the LoD with input `X`. diff --git a/paddle/fluid/operators/sign_op.cc b/paddle/fluid/operators/sign_op.cc index 8f8b7abd03..f3985dcc02 100644 --- a/paddle/fluid/operators/sign_op.cc +++ b/paddle/fluid/operators/sign_op.cc @@ -34,8 +34,7 @@ class SignOp : public framework::OperatorWithKernel { template class SignOpMaker : public framework::OpProtoAndCheckerMaker { public: - SignOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of sign operator."); AddOutput("Out", "(Tensor) Output tensor of sign operator."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/slice_op.cc b/paddle/fluid/operators/slice_op.cc new file mode 100644 index 0000000000..4bd23d5941 --- /dev/null +++ b/paddle/fluid/operators/slice_op.cc @@ -0,0 +1,133 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/slice_op.h" +#include +#include + +namespace paddle { +namespace operators { + +using Tensor = framework::Tensor; + +class SliceOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + void InferShape(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("Input"), + "Input (Input) of slice op should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output (Out) of slice op should not be null."); + + auto in_dims = ctx->GetInputDim("Input"); + PADDLE_ENFORCE(in_dims.size() < 7, + "The rank of input should be less than 7."); + framework::DDim out_dims(in_dims); + auto axes = ctx->Attrs().Get>("axes"); + auto starts = ctx->Attrs().Get>("starts"); + auto ends = ctx->Attrs().Get>("ends"); + + PADDLE_ENFORCE_EQ(starts.size(), ends.size()); + PADDLE_ENFORCE_EQ(starts.size(), axes.size()); + int dim_value, start, end; + for (size_t i = 0; i < axes.size(); ++i) { + dim_value = out_dims[axes[i]]; + start = starts[i] < 0 ? (starts[i] + dim_value) : starts[i]; + end = ends[i] < 0 ? (ends[i] + dim_value) : ends[i]; + start = std::max(start, 0); + end = std::max(end, 0); + start = std::min(start, dim_value); + end = std::min(end, dim_value); + start = std::min(start, end); + out_dims[axes[i]] = end - start; + } + ctx->SetOutputDim("Out", out_dims); + } + + protected: + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext &ctx) const override { + return framework::OpKernelType( + framework::ToDataType(ctx.Input("Input")->type()), + ctx.GetPlace()); + } +}; + +class SliceOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Input", "Tensor of data to extract slices from."); + AddOutput("Out", "Sliced data tensor."); + + AddAttr>( + "axes", + "(list) Axes that `starts` and `ends` apply to. It's optional." + "If not present, will be treated as [0, 1, ..., len(`starts`) - 1]."); + AddAttr>( + "starts", + "(list) Starting indices of corresponding axis in `axes`"); + AddAttr>( + "ends", + "(list) Starting indices of corresponding axis in `axes`."); + + AddComment(R"DOC( +Slice Operator. + +Produces a slice of the input tensor along multiple axes. Similar to numpy: +https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html +Slice uses `axes`, `starts` and `ends` attributes to specify the start and +end dimension for each axis in the list of axes, it uses this information +to slice the input data tensor. If a negative value is passed for any of +the start or end indices, it represents number of elements before the end +of that dimension. If the value passed to start or end is larger than +the n (the number of elements in this dimension), it represents n. +For slicing to the end of a dimension with unknown size, it is recommended +to pass in INT_MAX. If axes are omitted, they are set to [0, ..., ndim-1]. +Following examples will explain how slice works: + + .. code-block:: text + + Cast1: + Given: + data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] + axes = [0, 1] + starts = [1, 0] + ends = [2, 3] + Then: + result = [ [5, 6, 7], ] + + Cast2: + Given: + data = [ [1, 2, 3, 4], [5, 6, 7, 8], ] + starts = [0, 1] + ends = [-1, 1000] + Then: + result = [ [2, 3, 4], ] +)DOC"); + } +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; +REGISTER_OPERATOR(slice, ops::SliceOp, ops::SliceOpMaker, + paddle::framework::EmptyGradOpMaker); + +REGISTER_OP_CPU_KERNEL( + slice, ops::SliceKernel, + ops::SliceKernel, + ops::SliceKernel, + ops::SliceKernel); diff --git a/paddle/fluid/operators/slice_op.cu b/paddle/fluid/operators/slice_op.cu new file mode 100644 index 0000000000..8c1767c70b --- /dev/null +++ b/paddle/fluid/operators/slice_op.cu @@ -0,0 +1,22 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/operators/slice_op.h" + +namespace ops = paddle::operators; +REGISTER_OP_CUDA_KERNEL( + slice, ops::SliceKernel, + ops::SliceKernel, + ops::SliceKernel, + ops::SliceKernel); diff --git a/paddle/fluid/operators/slice_op.h b/paddle/fluid/operators/slice_op.h new file mode 100644 index 0000000000..ba231aee17 --- /dev/null +++ b/paddle/fluid/operators/slice_op.h @@ -0,0 +1,88 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +template +class SliceKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& ctx) const override { + int rank = ctx.Input("Input")->dims().size(); + switch (rank) { + case 1: + SliceCompute<1>(ctx); + break; + case 2: + SliceCompute<2>(ctx); + break; + case 3: + SliceCompute<3>(ctx); + break; + case 4: + SliceCompute<4>(ctx); + break; + case 5: + SliceCompute<5>(ctx); + break; + case 6: + SliceCompute<6>(ctx); + break; + } + } + + private: + template + void SliceCompute(const framework::ExecutionContext& context) const { + auto& place = + *context.template device_context().eigen_device(); + auto in = context.Input("Input"); + auto out = context.Output("Out"); + out->mutable_data(context.GetPlace()); + auto out_dims = out->dims(); + auto in_dims = in->dims(); + auto axes = context.Attr>("axes"); + auto starts = context.Attr>("starts"); + + auto offsets = Eigen::array(); + auto extents = Eigen::array(); + for (size_t i = 0; i < D; ++i) { + offsets[i] = 0; + extents[i] = out_dims[i]; + } + int start; + for (size_t i = 0; i < axes.size(); ++i) { + start = starts[i]; + if (start < 0) { + start = (start + in_dims[axes[i]]); + } + start = std::max(start, 0); + offsets[axes[i]] = start; + } + auto in_t = + framework::EigenTensor::From( + *in); + auto out_t = + framework::EigenTensor::From( + *out); + out_t.device(place) = in_t.slice(offsets, extents); + } +}; +} // namespace operators +} // namespace paddle diff --git a/paddle/fluid/operators/smooth_l1_loss_op.cc b/paddle/fluid/operators/smooth_l1_loss_op.cc index 322581fdef..622420c1c3 100644 --- a/paddle/fluid/operators/smooth_l1_loss_op.cc +++ b/paddle/fluid/operators/smooth_l1_loss_op.cc @@ -46,8 +46,7 @@ class SmoothL1LossOp : public framework::OperatorWithKernel { class SmoothL1LossOpMaker : public framework::OpProtoAndCheckerMaker { public: - SmoothL1LossOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor, default Tensor) A tensor with rank at least 2. " "The input value of smooth l1 loss op with shape " @@ -106,7 +105,7 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { using framework::OperatorWithKernel::OperatorWithKernel; void InferShape(framework::InferShapeContext* ctx) const override { - auto in_dims = ctx->GetInputDim("X"); + auto in_dims = ctx->GetInputDim("Diff"); auto out_dims = ctx->GetInputDim(framework::GradVarName("Out")); PADDLE_ENFORCE_GE(out_dims.size(), 2, @@ -128,12 +127,33 @@ class SmoothL1LossGradOp : public framework::OperatorWithKernel { } }; +class SmoothL1LossGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("smooth_l1_loss_grad"); + op->SetInput("InsideWeight", Input("InsideWeight")); + op->SetInput("OutsideWeight", Input("OutsideWeight")); + op->SetInput("Diff", Output("Diff")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + op->SetOutput(framework::GradVarName("Y"), InputGrad("Y")); + return std::unique_ptr(op); + } +}; + } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(smooth_l1_loss, ops::SmoothL1LossOp, ops::SmoothL1LossOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::SmoothL1LossGradMaker); REGISTER_OPERATOR(smooth_l1_loss_grad, ops::SmoothL1LossGradOp); REGISTER_OP_CPU_KERNEL( smooth_l1_loss, diff --git a/paddle/fluid/operators/softmax_cudnn_op.cu.cc b/paddle/fluid/operators/softmax_cudnn_op.cu.cc index 5596fa0648..2bdb23e999 100644 --- a/paddle/fluid/operators/softmax_cudnn_op.cu.cc +++ b/paddle/fluid/operators/softmax_cudnn_op.cu.cc @@ -30,8 +30,16 @@ class SoftmaxCUDNNKernel : public framework::OpKernel { // allocate memory on device. Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_x; + framework::LoDTensor flattened_out; + flattened_x.ShareDataWith(*X).Resize(flattened_dims); + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + math::SoftmaxCUDNNFunctor()( - context.template device_context(), X, Out); + context.template device_context(), + &flattened_x, &flattened_out); } }; @@ -46,9 +54,18 @@ class SoftmaxGradCUDNNKernel : public framework::OpKernel { // allocate memory on device. dX->mutable_data(context.GetPlace()); + auto dims = Out->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_out; + framework::LoDTensor flattened_d_out; + framework::LoDTensor flattened_d_x; + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + flattened_d_out.ShareDataWith(*dOut).Resize(flattened_dims); + flattened_d_x.ShareDataWith(*dX).Resize(flattened_dims); + math::SoftmaxGradCUDNNFunctor()( - context.template device_context(), Out, - dOut, dX); + context.template device_context(), + &flattened_out, &flattened_d_out, &flattened_d_x); } }; diff --git a/paddle/fluid/operators/softmax_mkldnn_op.cc b/paddle/fluid/operators/softmax_mkldnn_op.cc index 71b541d98f..01819f53e3 100644 --- a/paddle/fluid/operators/softmax_mkldnn_op.cc +++ b/paddle/fluid/operators/softmax_mkldnn_op.cc @@ -26,9 +26,82 @@ using paddle::platform::MKLDNNMemDesc; using mkldnn::memory; // Note: paddle has also "memory" namespace using mkldnn::primitive; -using mkldnn::softmax_forward; using mkldnn::prop_kind; +using mkldnn::softmax_backward; +using mkldnn::softmax_forward; using mkldnn::stream; +using platform::to_void_cast; + +class SoftmaxMKLDNNHandler : public platform::MKLDNNHandler { + public: + SoftmaxMKLDNNHandler( + std::shared_ptr softmax_pd, + const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine, + const std::string& base_key) + : platform::MKLDNNHandler(dev_ctx, engine, base_key), + softmax_pd_(softmax_pd) {} + + SoftmaxMKLDNNHandler( + std::shared_ptr softmax_pd, + std::shared_ptr softmax_bwd_pd, + const platform::MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine, + const std::string& base_key) + : platform::MKLDNNHandler(dev_ctx, engine, base_key), + softmax_pd_(softmax_pd), + softmax_bwd_pd_(softmax_bwd_pd) { + // If we are in Grad operatgor then update a key with BWD suffix to + // distinguish from FWD memory primitives + key_ += "-BWD"; + } + + std::shared_ptr AcquireSoftmax( + std::shared_ptr dst_memory_p, + std::shared_ptr src_memory_p) { + /*Generate key*/ + auto prim_key = key_ + "@softmax_p"; + + auto softmax_p = std::static_pointer_cast( + dev_ctx_.GetBlob(prim_key)); + PADDLE_ENFORCE((softmax_p != nullptr) || (is_reusing_ == false), + "Fail to find softmax primitive in device context"); + if (softmax_p == nullptr) { + softmax_p = std::make_shared( + *(softmax_pd_.get()), + *(static_cast(src_memory_p.get())), + *(static_cast(dst_memory_p.get()))); + dev_ctx_.SetBlob(prim_key, softmax_p); + } else { + is_reusing_ = true; + } + + return softmax_p; + } + + std::shared_ptr AcquireSoftmaxBackward( + std::shared_ptr dst_memory_p, + std::shared_ptr diff_dst_memory_p, + std::shared_ptr diff_src_memory_p) { + auto prim_key = key_ + "@softmax_bwd_p"; + auto softmax_bwd_p = std::static_pointer_cast( + dev_ctx_.GetBlob(prim_key)); + PADDLE_ENFORCE((softmax_bwd_p != nullptr) || (is_reusing_ == false), + "Fail to find softmax backward primitive in device context"); + if (softmax_bwd_p == nullptr) { + softmax_bwd_p = std::make_shared( + *softmax_bwd_pd_, *(dst_memory_p.get()), *(diff_dst_memory_p.get()), + *(diff_src_memory_p.get())); + dev_ctx_.SetBlob(prim_key, softmax_bwd_p); + } else { + is_reusing_ = true; + } + + return softmax_bwd_p; + } + + private: + std::shared_ptr softmax_pd_; + std::shared_ptr softmax_bwd_pd_; +}; template class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { @@ -40,38 +113,54 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { auto mkldnn_engine = dev_ctx.GetEngine(); const Tensor* input = ctx.Input("X"); Tensor* output = ctx.Output("Out"); - PADDLE_ENFORCE(input->dims().size() == 2UL, - "The input of softmax op must be a 2D matrix."); - const T* input_data = input->data(); - // allocate memory for output - T* output_data = output->mutable_data(ctx.GetPlace()); - std::vector src_tz = paddle::framework::vectorize2int(input->dims()); - std::vector dst_tz = paddle::framework::vectorize2int(output->dims()); - // MKL-DNN does support softmax over selected axis. Having 2D Tensor, - // we will make normalization after final eg. axis: 1 - PADDLE_ENFORCE(((src_tz[0] == dst_tz[0]) && (src_tz[1] == dst_tz[1])), - "Softmax input and output dimensions should match"); + PADDLE_ENFORCE_EQ( + input->dims(), output->dims(), + "The shape of softmax's input and output must be identical."); + + // make sure 'output' holds memory, which will be shared by + // 'flattened_output' later. + output->mutable_data(ctx.GetPlace()); + + // flatten input and output to 2-D matrixs + auto dims = input->dims(); // input and output share the same shape + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::Tensor flattened_input; + framework::Tensor flattened_output; + flattened_input.ShareDataWith(*input).Resize(flattened_dims); + flattened_output.ShareDataWith(*output).Resize(flattened_dims); + + const T* input_data = flattened_input.data(); + T* output_data = flattened_output.mutable_data(ctx.GetPlace()); + + std::vector src_tz = paddle::framework::vectorize2int(flattened_dims); + std::vector dst_tz = src_tz; // Same memory descriptor to be used for input and output memory::dims softmax_tz = {src_tz[0], src_tz[1]}; - // Currently only supports NC data format - // TODO(jczaja-intel): support more formats - auto softmax_md = - MKLDNNMemDesc({softmax_tz}, memory::f32, memory::format::nc); + // Generate keys for storing/retriving primitives for this operator + const std::string key = + platform::MKLDNNHandler::GetHash(softmax_tz, ctx.op().Output("Out")); + const std::string key_softmax_pd = key + "@softmax_pd"; + + // Currently only NC data format is supported + auto softmax_md = MKLDNNMemDesc( + {softmax_tz}, platform::MKLDNNGetDataType(), memory::format::nc); // Normalization is made after innermost dimension eg. C out of NC auto softmax_desc = softmax_forward::desc(prop_kind::forward_scoring, softmax_md, 1 /*dim: C*/); - // create memory primitives - auto softmax_src_memory = - memory({softmax_md, mkldnn_engine}, - static_cast(const_cast(input_data))); - auto softmax_dst_memory = - memory({softmax_md, mkldnn_engine}, - static_cast(const_cast(output_data))); - auto softmax_prim_desc = - softmax_forward::primitive_desc(softmax_desc, mkldnn_engine); - auto softmax = softmax_forward(softmax_prim_desc, softmax_src_memory, - softmax_dst_memory); - std::vector pipeline{softmax}; + auto softmax_pd = std::make_shared( + softmax_desc, mkldnn_engine); + dev_ctx.SetBlob(key_softmax_pd, softmax_pd); + + SoftmaxMKLDNNHandler handler(softmax_pd, dev_ctx, mkldnn_engine, key); + auto softmax_src_memory_p = + handler.AcquireSrcMemory(softmax_md, to_void_cast(input_data)); + auto softmax_dst_memory_p = + handler.AcquireDstMemory(softmax_md, to_void_cast(output_data)); + auto softmax_p = + handler.AcquireSoftmax(softmax_dst_memory_p, softmax_src_memory_p); + + std::vector pipeline{ + *(static_cast(softmax_p.get()))}; stream(stream::kind::eager).submit(pipeline).wait(); const bool is_test = ctx.Attr("is_test"); @@ -85,6 +174,88 @@ class SoftmaxMKLDNNKernel : public paddle::framework::OpKernel { } }; +template +class SoftmaxMKLDNNGradKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + + auto& dev_ctx = ctx.template device_context(); + auto mkldnn_engine = dev_ctx.GetEngine(); + const Tensor* output = ctx.Input("Out"); + auto* dout = ctx.template Input(framework::GradVarName("Out")); + auto* dx = + ctx.template Output(framework::GradVarName("X")); + + PADDLE_ENFORCE_EQ( + dout->dims(), dx->dims(), + "The shape of softmax_grad's input and output must be identical."); + + // make sure 'dx' holds memory, which will be shared by 'flattened_dx' + // later. + dx->template mutable_data(ctx.GetPlace()); + + auto dims = dout->dims(); // input and output share the same shape + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::Tensor flattened_output; + framework::Tensor flattened_dout; + framework::Tensor flattened_dx; + flattened_output.ShareDataWith(*output).Resize(flattened_dims); + flattened_dout.ShareDataWith(*dout).Resize(flattened_dims); + flattened_dx.ShareDataWith(*dx).Resize(flattened_dims); + + const T* dst_data = flattened_output.data(); + const T* diff_dst_ptr = flattened_dout.template data(); + T* diff_src_ptr = flattened_dx.template mutable_data(ctx.GetPlace()); + + std::vector dst_tz = paddle::framework::vectorize2int(flattened_dims); + std::vector src_tz(dst_tz); + + // Same memory descriptor to be used for input and output + memory::dims softmax_tz = {src_tz[0], src_tz[1]}; + // Currently only supports NC data format + // retrieve eltwise primitive desc from device context + const std::string key = + platform::MKLDNNHandler::GetHash(softmax_tz, ctx.op().Input("Out")); + const std::string key_softmax_pd = key + "@softmax_pd"; + + auto softmax_pd = + std::static_pointer_cast( + dev_ctx.GetBlob(key_softmax_pd)); + PADDLE_ENFORCE(softmax_pd != nullptr, + "Fail to find softmax_pd in device context"); + + // TODO(jczaja): Add layouts support when there is a need to do so + // Two dimensional softmax does support NC format + auto data_softmax_md = MKLDNNMemDesc( + {softmax_tz}, platform::MKLDNNGetDataType(), memory::format::nc); + auto diff_softmax_md = MKLDNNMemDesc( + {softmax_tz}, platform::MKLDNNGetDataType(), memory::format::nc); + // Normalization is made after innermost dimension eg. C out of NC + auto softmax_bwd_desc = + softmax_backward::desc(diff_softmax_md, data_softmax_md, 1 /* dim: C*/); + auto softmax_bwd_pd = + std::make_shared( + softmax_bwd_desc, mkldnn_engine, *softmax_pd); + + SoftmaxMKLDNNHandler handler(softmax_pd, softmax_bwd_pd, dev_ctx, + mkldnn_engine, key); + auto dst_memory_p = + handler.AcquireDstMemory(data_softmax_md, to_void_cast(dst_data)); + auto diff_dst_memory_p = handler.AcquireDiffDstMemory( + diff_softmax_md, to_void_cast(diff_dst_ptr)); + auto diff_src_memory_p = handler.AcquireDiffSrcMemory( + diff_softmax_md, to_void_cast(diff_src_ptr)); + + // Get primitve from device context + auto softmax_bwd_p = handler.AcquireSoftmaxBackward( + dst_memory_p, diff_dst_memory_p, diff_src_memory_p); + + std::vector pipeline{*softmax_bwd_p}; + stream(stream::kind::eager).submit(pipeline).wait(); + } +}; } // namespace operators } // namespace paddle @@ -92,3 +263,5 @@ namespace ops = paddle::operators; REGISTER_OP_KERNEL(softmax, MKLDNN, ::paddle::platform::CPUPlace, ops::SoftmaxMKLDNNKernel); +REGISTER_OP_KERNEL(softmax_grad, MKLDNN, ::paddle::platform::CPUPlace, + ops::SoftmaxMKLDNNGradKernel); diff --git a/paddle/fluid/operators/softmax_op.cc b/paddle/fluid/operators/softmax_op.cc index aa7b192e32..bb08123882 100644 --- a/paddle/fluid/operators/softmax_op.cc +++ b/paddle/fluid/operators/softmax_op.cc @@ -37,10 +37,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { PADDLE_ENFORCE(ctx->HasOutput("Out"), "Output(Out) of SoftmaxOp should not be null."); - auto x_dims = ctx->GetInputDim("X"); - PADDLE_ENFORCE(x_dims.size() == 2UL, - "The input of softmax op must be a matrix."); - ctx->SetOutputDim("Out", x_dims); + ctx->SetOutputDim("Out", ctx->GetInputDim("X")); ctx->ShareLoD("X", /*->*/ "Out"); } @@ -49,6 +46,9 @@ class SoftmaxOp : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { // choose cudnn kernel if the runtime supported. framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { library_ = framework::LibraryType::kCUDNN; @@ -58,6 +58,7 @@ class SoftmaxOp : public framework::OperatorWithKernel { if (library_ == framework::LibraryType::kPlain && platform::CanMKLDNNBeUsed(ctx)) { library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; } #endif @@ -68,21 +69,19 @@ class SoftmaxOp : public framework::OperatorWithKernel { "float16 can only be used on GPU place"); } - std::string data_format = ctx.Attr("data_format"); - return framework::OpKernelType(input_data_type, ctx.GetPlace(), - framework::StringToDataLayout(data_format), + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, library_); } }; class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", - "The input tensor of softmax. " - "2-D with shape [batch_size, input_feature_dimensions]."); - AddOutput("Out", "The normalized values with the same shape as X."); + "The input tensor of softmax, " + "whose last dimension is the input_feature_dimensions."); + AddOutput("Out", "The normalized values with the same shape as X.") + .Reuse("X"); AddAttr( "use_cudnn", "(bool, default false) Only used in cudnn kernel, need install cudnn") @@ -103,20 +102,23 @@ class SoftmaxOpMaker : public framework::OpProtoAndCheckerMaker { AddComment(R"DOC( Softmax Operator. -The input of the softmax operator is a 2-D tensor with shape N x K (N is the -batch_size, K is the dimension of input feature). The output tensor has the -same shape as the input tensor. +The input of the softmax operator is a tensor of any rank. The output tensor +has the same shape as the input. -For each row of the input tensor, the softmax operator squashes the -K-dimensional vector of arbitrary real values to a K-dimensional vector of real -values in the range [0, 1] that add up to 1. +The input tensor will first be logically flattened to a 2-D matrix. The matrix's +second dimension(row length) is as same as the last dimension of the input +tensor, and the first dimension(column length) is the product of all other +dimensions of the input tensor. For each row of the matrix, the softmax operator +squashes the K-dimensional(K is the width of the matrix, which is also the size +of the input tensor's last dimension) vector of arbitrary real values to a +K-dimensional vector of real values in the range [0, 1] that add up to 1. It computes the exponential of the given dimension and the sum of exponential values of all the other dimensions in the K-dimensional vector input. Then the ratio of the exponential of the given dimension and the sum of exponential values of all the other dimensions is the output of the softmax operator. -For each row $i$ and each column $j$ in Input(X), we have: +For each row $i$ and each column $j$ in the matrix, we have: $$Out[i, j] = \frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])}$$ )DOC"); @@ -135,7 +137,8 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { ctx->GetInputDim(framework::GradVarName("Out")), "Input(Out) and its gradients should have a same shape."); - ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + ctx->SetOutputDim(framework::GradVarName("X"), + ctx->GetInputDim(framework::GradVarName("Out"))); } protected: @@ -143,25 +146,58 @@ class SoftmaxOpGrad : public framework::OperatorWithKernel { const framework::ExecutionContext& ctx) const override { // choose cudnn kernel if the runtime supported. framework::LibraryType library_{framework::LibraryType::kPlain}; + std::string data_format = ctx.Attr("data_format"); + framework::DataLayout layout_ = framework::StringToDataLayout(data_format); + #ifdef PADDLE_WITH_CUDA if (platform::CanCUDNNBeUsed(ctx)) { library_ = framework::LibraryType::kCUDNN; } #endif - std::string data_format = ctx.Attr("data_format"); - return framework::OpKernelType( - framework::ToDataType(ctx.Input("X")->type()), ctx.GetPlace(), - framework::StringToDataLayout(data_format), library_); +#ifdef PADDLE_WITH_MKLDNN + if (library_ == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library_ = framework::LibraryType::kMKLDNN; + layout_ = framework::DataLayout::kMKLDNN; + } +#endif + auto input_data_type = framework::ToDataType( + ctx.Input(framework::GradVarName("Out"))->type()); + if (input_data_type == framework::proto::VarType::FP16) { + PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()), + "float16 can only be used on GPU place"); + } + + return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_, + library_); } }; +class SoftmaxOpGradMaker : public framework::SingleGradOpDescMaker { + public: + using framework::SingleGradOpDescMaker::SingleGradOpDescMaker; + + protected: + std::unique_ptr Apply() const override { + auto* op = new framework::OpDesc(); + op->SetType("softmax_grad"); + + op->SetInput("Out", Output("Out")); + op->SetInput(framework::GradVarName("Out"), OutputGrad("Out")); + + op->SetAttrMap(Attrs()); + + op->SetOutput(framework::GradVarName("X"), InputGrad("X")); + return std::unique_ptr(op); + } +}; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OPERATOR(softmax, ops::SoftmaxOp, ops::SoftmaxOpMaker, - paddle::framework::DefaultGradOpDescMaker); + ops::SoftmaxOpGradMaker); REGISTER_OPERATOR(softmax_grad, ops::SoftmaxOpGrad); REGISTER_OP_CPU_KERNEL( softmax, ops::SoftmaxKernel, diff --git a/paddle/fluid/operators/softmax_op.h b/paddle/fluid/operators/softmax_op.h index 600da45a0b..1205bd0587 100644 --- a/paddle/fluid/operators/softmax_op.h +++ b/paddle/fluid/operators/softmax_op.h @@ -31,8 +31,16 @@ class SoftmaxKernel : public framework::OpKernel { // allocate memory on device. Out->mutable_data(context.GetPlace()); + auto dims = X->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_x; + framework::LoDTensor flattened_out; + flattened_x.ShareDataWith(*X).Resize(flattened_dims); + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + math::SoftmaxFunctor()( - context.template device_context(), X, Out); + context.template device_context(), &flattened_x, + &flattened_out); } }; @@ -47,8 +55,18 @@ class SoftmaxGradKernel : public framework::OpKernel { // allocate memory on device. dX->mutable_data(context.GetPlace()); + auto dims = Out->dims(); + auto flattened_dims = framework::flatten_to_2d(dims, dims.size() - 1); + framework::LoDTensor flattened_out; + framework::LoDTensor flattened_d_out; + framework::LoDTensor flattened_d_x; + flattened_out.ShareDataWith(*Out).Resize(flattened_dims); + flattened_d_out.ShareDataWith(*dOut).Resize(flattened_dims); + flattened_d_x.ShareDataWith(*dX).Resize(flattened_dims); + math::SoftmaxGradFunctor()( - context.template device_context(), Out, dOut, dX); + context.template device_context(), &flattened_out, + &flattened_d_out, &flattened_d_x); } }; diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc index 857e573357..53cb716a97 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cc +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cc @@ -20,8 +20,7 @@ namespace operators { class SoftmaxWithCrossEntropyOpMaker : public framework::OpProtoAndCheckerMaker { public: - SoftmaxWithCrossEntropyOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Logits", "(Tensor, default: Tensor), The unscaled log probabilities " "which is a 2-D tensor with shape [N x K]. N is the batch_size, " diff --git a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu index 8f7840cee1..a559b01ed3 100644 --- a/paddle/fluid/operators/softmax_with_cross_entropy_op.cu +++ b/paddle/fluid/operators/softmax_with_cross_entropy_op.cu @@ -1,4 +1,4 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,6 +14,8 @@ limitations under the License. */ #define EIGEN_USE_GPU +#include +#include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" namespace paddle { @@ -53,8 +55,196 @@ __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, logit_grad[ids] = loss_grad[row_ids] * (logit_grad[ids] - labels[ids]); } } + } // namespace +static __device__ __forceinline__ float real_exp(float x) { return expf(x); } +static __device__ __forceinline__ double real_exp(double x) { return exp(x); } +static __device__ __forceinline__ float real_log(float x) { + return math::TolerableValue()(logf(x)); +} +static __device__ __forceinline__ double real_log(double x) { + return math::TolerableValue()(log(x)); +} + +/** In the following codes, 3 CUDA kernels are implemented to calculate softmax + * and loss **/ +/* + Supposing the x is `logits` and y is `labels`, the equations are as +followings: + + cross\_entropy_i = \sum_{j}[- y_i_j * log({e^{x_i_j}/\sum_{j}e^{x_i_j}})] + = \sum_{j}[- y_i_j * log({e^{x_i_j - max_i}/\sum_{j}e^{x_i_j-max_i}})] + = \sum_{j}[-y_i_j * (x_i_j - max_i - log\sum_{j}e^{x_i_j - max_i})] + = \sum_{j}[-y_i_j * (x_i_j - max_i - logDiffMaxSum_i)] + = \sum_{j}(-y_i_j * tmp_i_j) + + softmax_i_j = e^{tmp_i_j} + +where: + max_i = \max_{j}{x_i_j} + logDiffMaxSum_i = log\sum_{j}e^{x_i_j - max_i} + tmp_i_j = x_i_j - max_i - logDiffMaxSum_i + +Therefore, the calculation can be separated into 3 steps: +Step 1: row-wise operation to calculate max_i +Step 2: row-wise operation to calculate logDiffMaxSum_i +Step 3: caculate tmp_i_j, and finally get softmax_i_j and cross\_entropy_i + +To save memory, we can share memory among max_i, logDiffMaxSum_i and +cross\_entropy_i. +In this way, the 3 steps should be changed to: +Step 1 (RowReductionForMax): row-wise operation to calculate max_i +Step 2 (RowReductionForDiffMaxSum): calculate immediate result of softmax'_i_j = +x_i_j - max_i, and row-wise operation to calculate logDiffMaxSum_i +Step 3 (RowReductionForSoftmaxAndCrossEntropy): calculate tmp_i_j = softmax'_i_j +- logDiffMaxSum_i, and finally get softmax_i_j and cross\_entropy_i +*/ + +// There are 3 kinds of reduce algorithms in cub: +// BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY +// BLOCK_REDUCE_RAKING +// BLOCK_REDUCE_WARP_REDUCTIONS (default) +template +using BlockReduce = + cub::BlockReduce; + +template +using BlockReduceTempStorage = typename BlockReduce::TempStorage; + +// Make sure that BlockDim <= feature_size +// This kernel is used to calculate the max element of each row +template +__global__ void RowReductionForMax(const T* logits_data, T* max_data, + int feature_size) { + __shared__ BlockReduceTempStorage temp_storage; + + auto beg_idx = feature_size * blockIdx.x + threadIdx.x; + auto end_idx = feature_size * (blockIdx.x + 1); + + T cur_max = logits_data[beg_idx]; + beg_idx += BlockDim; + while (beg_idx < end_idx) { + if (cur_max < logits_data[beg_idx]) { + cur_max = logits_data[beg_idx]; + } + beg_idx += BlockDim; + } + + cur_max = BlockReduce(temp_storage).Reduce(cur_max, cub::Max()); + + if (threadIdx.x == 0) { + max_data[blockIdx.x] = cur_max < -64 ? -64 : cur_max; + } +} + +// Make sure that BlockDim <= feature_size +template +__global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, + T* softmax, int feature_size) { + __shared__ BlockReduceTempStorage temp_storage; + + auto beg_idx = feature_size * blockIdx.x + threadIdx.x; + auto end_idx = feature_size * (blockIdx.x + 1); + + auto block_max = max_data[blockIdx.x]; + + softmax[beg_idx] = logits_data[beg_idx] - block_max; + T diff_max_sum = real_exp(softmax[beg_idx]); + beg_idx += BlockDim; + while (beg_idx < end_idx) { + softmax[beg_idx] = logits_data[beg_idx] - block_max; + diff_max_sum += real_exp(softmax[beg_idx]); + beg_idx += BlockDim; + } + + diff_max_sum = + BlockReduce(temp_storage).Reduce(diff_max_sum, cub::Sum()); + if (threadIdx.x == 0) max_data[blockIdx.x] = real_log(diff_max_sum); +} + +// Make sure that BlockDim <= feature_size +template +__global__ void RowReductionForSoftmaxAndCrossEntropy(const T* logits_data, + const T* labels_data, + T* loss_data, T* softmax, + int feature_size) { + __shared__ BlockReduceTempStorage temp_storage; + + auto beg_idx = feature_size * blockIdx.x + threadIdx.x; + auto end_idx = feature_size * (blockIdx.x + 1); + + // log_diff_max_sum shares memory with loss + auto block_log_diff_max_sum = loss_data[blockIdx.x]; + auto tmp = softmax[beg_idx] - block_log_diff_max_sum; + softmax[beg_idx] = real_exp(tmp); + auto loss = -labels_data[beg_idx] * tmp; + beg_idx += BlockDim; + while (beg_idx < end_idx) { + tmp = softmax[beg_idx] - block_log_diff_max_sum; + softmax[beg_idx] = real_exp(tmp); + loss -= (labels_data[beg_idx] * tmp); + beg_idx += BlockDim; + } + + loss = BlockReduce(temp_storage).Reduce(loss, cub::Sum()); + if (threadIdx.x == 0) loss_data[blockIdx.x] = loss; +} + +template +__global__ void SetSoftmaxToOneWhenFeatureSizeIsOne(T* out, int batch_size) { + auto idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx < batch_size) out[idx] = static_cast(1); +} + +template +static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, + const T* labels_data, + T* softmax_data, T* loss_data, + int batch_size, int feature_size, + cudaStream_t stream) { + constexpr int kMaxBlockDim = 512; + int block_dim = feature_size >= kMaxBlockDim + ? kMaxBlockDim + : (1 << static_cast(std::log2(feature_size))); + +#define CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ + case BlockDim: \ + RowReductionForMax<<>>( \ + logits_data, loss_data, feature_size); \ + RowReductionForDiffMaxSum<<>>( \ + logits_data, loss_data, softmax_data, feature_size); \ + RowReductionForSoftmaxAndCrossEntropy< \ + T, BlockDim><<>>( \ + logits_data, labels_data, loss_data, softmax_data, feature_size); \ + break + + switch (block_dim) { + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); + CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); + case 1: + SetSoftmaxToOneWhenFeatureSizeIsOne<<<(batch_size + kMaxBlockDim - 1) / + kMaxBlockDim, + kMaxBlockDim, 0, stream>>>( + softmax_data, batch_size); + cudaMemsetAsync(loss_data, 0, batch_size, stream); + break; + default: + PADDLE_THROW("BlockDim must be 2^n in softmax_with_cross_entropy_op"); + break; + } + +#undef CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL +} + template class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { public: @@ -66,14 +256,24 @@ class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel { Tensor* softmax = context.Output("Softmax"); Tensor* loss = context.Output("Loss"); - softmax->mutable_data(context.GetPlace()); - loss->mutable_data(context.GetPlace()); - - math::SoftmaxFunctor()( - context.cuda_device_context(), logits, softmax); - math::CrossEntropyFunctor()( - context.cuda_device_context(), loss, softmax, labels, - context.Attr("soft_label")); + auto* softmax_data = softmax->mutable_data(context.GetPlace()); + auto* loss_data = loss->mutable_data(context.GetPlace()); + + auto soft_label = context.Attr("soft_label"); + if (soft_label) { + int batch_size = logits->dims()[0]; + int feature_size = logits->dims()[1]; + auto* logits_data = logits->data(); + auto* labels_data = labels->data(); + SoftmaxWithCrossEntropyFusedKernel( + logits_data, labels_data, softmax_data, loss_data, batch_size, + feature_size, context.cuda_device_context().stream()); + } else { + math::SoftmaxCUDNNFunctor()(context.cuda_device_context(), logits, + softmax); + math::CrossEntropyFunctor()( + context.cuda_device_context(), loss, softmax, labels, false); + } } }; diff --git a/paddle/fluid/operators/split_byref_op.cc b/paddle/fluid/operators/split_byref_op.cc index 7413ce3e9c..bc998e1abb 100644 --- a/paddle/fluid/operators/split_byref_op.cc +++ b/paddle/fluid/operators/split_byref_op.cc @@ -64,8 +64,7 @@ class SplitByrefOp : public framework::OperatorWithKernel { class SplitByrefOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitByrefOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of the split operator."); AddOutput("Out", "(Tensor) Output tensors of the split operator.") .AsDuplicable(); diff --git a/paddle/fluid/operators/split_ids_op.cc b/paddle/fluid/operators/split_ids_op.cc index a53cbc8ac5..c867c46873 100644 --- a/paddle/fluid/operators/split_ids_op.cc +++ b/paddle/fluid/operators/split_ids_op.cc @@ -19,8 +19,7 @@ namespace operators { class SplitIdsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitIdsOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Ids", "(LoDTensor) the input ids with shape{batch_num, 1}"); AddOutput("Out", "(LoDTensor) The outputs of the input Ids.") .AsDuplicable(); diff --git a/paddle/fluid/operators/split_ids_op.h b/paddle/fluid/operators/split_ids_op.h index d263426e07..c4af5a65fc 100644 --- a/paddle/fluid/operators/split_ids_op.h +++ b/paddle/fluid/operators/split_ids_op.h @@ -14,6 +14,7 @@ limitations under the License. */ #pragma once +#include #include #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/operators/math/selected_rows_functor.h" @@ -67,10 +68,15 @@ class SplitIdsOpKernel : public framework::OpKernel { const auto &ids_rows = ids_selected_rows->rows(); auto outs = ctx.MultiOutput("Out"); const size_t shard_num = outs.size(); + for (auto &out : outs) { + out->mutable_rows()->clear(); + } // get rows for outputs - for (auto &id : ids_rows) { - size_t shard_id = static_cast(id) % shard_num; - outs[shard_id]->mutable_rows()->push_back(id); + std::unordered_map id_to_index; + for (size_t i = 0; i < ids_rows.size(); ++i) { + id_to_index[ids_rows[i]] = i; + size_t shard_id = static_cast(ids_rows[i]) % shard_num; + outs[shard_id]->mutable_rows()->push_back(ids_rows[i]); } int64_t row_width = ids_dims[1]; @@ -80,7 +86,8 @@ class SplitIdsOpKernel : public framework::OpKernel { {static_cast(out->rows().size()), row_width}); T *output = out->mutable_value()->mutable_data(ddim, place); for (int64_t i = 0; i < ddim[0]; ++i) { - memcpy(output + i * row_width, ids + out->rows()[i] * row_width, + memcpy(output + i * row_width, + ids + id_to_index[out->rows()[i]] * row_width, row_width * sizeof(T)); } } diff --git a/paddle/fluid/operators/split_lod_tensor_op.cc b/paddle/fluid/operators/split_lod_tensor_op.cc index 3222cce239..767449cde9 100644 --- a/paddle/fluid/operators/split_lod_tensor_op.cc +++ b/paddle/fluid/operators/split_lod_tensor_op.cc @@ -125,8 +125,7 @@ class SplitLoDTensorOp : public framework::OperatorBase { class SplitLoDTensorOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - SplitLoDTensorOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input LoDTensor"); AddInput("Mask", "A bool column vector which mask the input"); AddOutput("OutTrue", "True branch of input LoDTensor"); diff --git a/paddle/fluid/operators/split_op.cc b/paddle/fluid/operators/split_op.cc index a4398df36b..d661b276bc 100644 --- a/paddle/fluid/operators/split_op.cc +++ b/paddle/fluid/operators/split_op.cc @@ -70,8 +70,7 @@ class SplitOp : public framework::OperatorWithKernel { class SplitOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input tensor of the split operator."); AddOutput("Out", "(Tensor) Output tensors of the split operator.") .AsDuplicable(); @@ -116,4 +115,7 @@ USE_CPU_ONLY_OP(concat); REGISTER_OPERATOR(split, ops::SplitOp, ops::SplitOpMaker, ops::SplitGradMaker); REGISTER_OP_CPU_KERNEL(split, - ops::SplitOpKernel); + ops::SplitOpKernel, + ops::SplitOpKernel, + ops::SplitOpKernel, + ops::SplitOpKernel); diff --git a/paddle/fluid/operators/split_op.cu.cc b/paddle/fluid/operators/split_op.cu.cc index efa378af85..18e0904681 100644 --- a/paddle/fluid/operators/split_op.cu.cc +++ b/paddle/fluid/operators/split_op.cu.cc @@ -15,4 +15,7 @@ limitations under the License. */ #include "paddle/fluid/operators/split_op.h" namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( - split, ops::SplitOpKernel); + split, ops::SplitOpKernel, + ops::SplitOpKernel, + ops::SplitOpKernel, + ops::SplitOpKernel); diff --git a/paddle/fluid/operators/split_selected_rows_op.cc b/paddle/fluid/operators/split_selected_rows_op.cc index e1ce3d0c1b..76615a9405 100644 --- a/paddle/fluid/operators/split_selected_rows_op.cc +++ b/paddle/fluid/operators/split_selected_rows_op.cc @@ -19,8 +19,7 @@ namespace operators { class SplitSelectedRowsOpMaker : public framework::OpProtoAndCheckerMaker { public: - SplitSelectedRowsOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "The input SelectedRows."); AddOutput("Out", "The outputs of the input SelectedRows.").AsDuplicable(); AddAttr>("height_sections", diff --git a/paddle/fluid/operators/spp_op.cc b/paddle/fluid/operators/spp_op.cc index 1cada95501..a2a96b72f0 100644 --- a/paddle/fluid/operators/spp_op.cc +++ b/paddle/fluid/operators/spp_op.cc @@ -20,8 +20,7 @@ namespace operators { class SppOpMaker : public framework::OpProtoAndCheckerMaker { public: - SppOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of spp operator. " diff --git a/paddle/fluid/operators/squared_l2_distance_op.cc b/paddle/fluid/operators/squared_l2_distance_op.cc index c32f575b54..42532a294b 100644 --- a/paddle/fluid/operators/squared_l2_distance_op.cc +++ b/paddle/fluid/operators/squared_l2_distance_op.cc @@ -56,8 +56,7 @@ class SquaredL2DistanceOp : public framework::OperatorWithKernel { class SquaredL2DistanceOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2DistanceOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) Input of SquaredL2DistanceOp."); AddInput("Y", "(Tensor) Target of SquaredL2DistanceOp."); AddOutput("sub_result", diff --git a/paddle/fluid/operators/squared_l2_norm_op.cc b/paddle/fluid/operators/squared_l2_norm_op.cc index 4ce51259da..7bd82e0ce4 100644 --- a/paddle/fluid/operators/squared_l2_norm_op.cc +++ b/paddle/fluid/operators/squared_l2_norm_op.cc @@ -48,8 +48,7 @@ class SquaredL2NormGradOp : public framework::OperatorWithKernel { class SquaredL2NormOpMaker : public framework::OpProtoAndCheckerMaker { public: - SquaredL2NormOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of squared_l2_norm op."); AddOutput("Out", "(Scalar) The output of squared_l2_norm op."); AddComment(R"DOC( diff --git a/paddle/fluid/operators/squeeze_op.cc b/paddle/fluid/operators/squeeze_op.cc new file mode 100644 index 0000000000..6c507baf3a --- /dev/null +++ b/paddle/fluid/operators/squeeze_op.cc @@ -0,0 +1,202 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +class SqueezeOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of SqueezeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of SqueezeOp should not be null."); + + const auto &x_dims = ctx->GetInputDim("X"); + // Check input tensor dims (<6) Eigen limit. + PADDLE_ENFORCE(x_dims.size() <= 6, + "Invalid dimnesions, the rank of Input(X) " + "should be in the range of [1, 6] (Eigen limit)."); + + const auto &axes = ctx->Attrs().Get>("axes"); + for (int a : axes) { + PADDLE_ENFORCE_LT(a, x_dims.size(), + "The squeeze axis should be less than input " + "tensor's rank."); + } + + auto out_dims = GetOutputShape(axes, x_dims); + ctx->SetOutputDim("Out", out_dims); + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", "Out"); + } + } + + static framework::DDim GetOutputShape(const std::vector squeeze_dims, + const framework::DDim &in_dims) { + size_t num_squeeze_dims = squeeze_dims.size(); + int cnt_squeezed_dims = 0; + bool should_squeeze[9] = {false}; + + // Determines number of dimensions of output tensor after squeeze. + // Mark and count the dimensions need to be squeezed + if (num_squeeze_dims == 0) { + for (int idx = 0; idx < in_dims.size(); ++idx) { + if (in_dims[idx] == 1) { + should_squeeze[idx] = true; + ++cnt_squeezed_dims; + } + } + } else { + for (size_t idx = 0; idx < num_squeeze_dims; ++idx) { + int current = squeeze_dims[idx] < 0 ? squeeze_dims[idx] + in_dims.size() + : squeeze_dims[idx]; + // Check current index, the upper limit has beed checked in line 36. + PADDLE_ENFORCE(current >= 0, + "Invalid axis, the negative axis is out of range."); + PADDLE_ENFORCE(in_dims[current] == 1, + "Invalid axis index, the axis that will be squeezed " + "should be equal to 1."); + + if (!(should_squeeze[current])) { + ++cnt_squeezed_dims; + } + should_squeeze[current] = true; + } + } + + // Make output dimensions + std::vector output_shape(in_dims.size() - cnt_squeezed_dims, 0); + for (int in_idx = 0, out_idx = 0; in_idx < in_dims.size(); ++in_idx) { + if (!should_squeeze[in_idx]) { + output_shape[out_idx++] = in_dims[in_idx]; + } + } + + return framework::make_ddim(output_shape); + } +}; + +class SqueezeOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &axes = Attr>("axes"); + auto x_dims = scope.FindVar(Input("X"))->Get().dims(); + auto out_dims = SqueezeOpInferShape::GetOutputShape(axes, x_dims); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(out_dims); + attrs["inplace"] = Attr("inplace"); + // Invoke Reshape Op + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape", {{"X", {Input("X")}}, {"Shape", {}}}, + {{"Out", {Output("Out")}}}, attrs); + reshape_op->Run(scope, place); + } +}; + +class SqueezeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor). The input tensor of squeeze operator."); + AddOutput("Out", "(Tensor). The output tensor of squeeze operator."); + AddAttr>("axes", + "(std::vector). List of integers," + " indicating the dimensions to squeeze.") + .SetDefault({}); + AddAttr("inplace", + "(default: false) Squeeze the source tensor's shape without " + "memory copy. When Attr(inplace) is set true, the output " + "tensor shares memory with Input(X), otherwise, a new output " + "tensor is created, and its data are copied from Input(x).") + .SetDefault(false); + AddComment(R"DOC( + Squeeze Operator. + + Remove single-dimensional entries from the shape of a tensor. + Takes a parameter axes with a list of axes to squeeze. + If axes is not provided, all the single dimensions will be removed from the shape. + If an axis is selected with shape entry not equal to one, an error is raised. + + Examples: + Case 1: + Given + X.shape = (1, 3, 1, 5) + and + axes = [0] + we get: + Out.shape = (3, 1, 5) + + Case 2: + Given + X.shape = (1, 3, 1, 5) + and + axes = [] + we get: + Out.shape = (3, 5) + )DOC"); + } +}; + +class SqueezeGradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *context) const override { + context->SetOutputDim(framework::GradVarName("X"), + context->GetInputDim("X")); + context->ShareLoD("X", framework::GradVarName("X")); + } +}; + +class SqueezeGradOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto dx_name = Output(framework::GradVarName("X")); + auto dout_name = Input(framework::GradVarName("Out")); + auto x_dims = scope.FindVar(Input("X"))->Get().dims(); + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(x_dims); + attrs["inplace"] = Attr("inplace"); + + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape", {{"X", {dout_name}}, {"Shape", {}}}, {{"Out", {dx_name}}}, + attrs); + reshape_op->Run(scope, place); + } +}; + +} // namespace operators +} // namespace paddle + +// Tell linker to use reshape op +USE_OP(reshape); + +namespace ops = paddle::operators; +REGISTER_OPERATOR(squeeze, ops::SqueezeOp, ops::SqueezeOpMaker, + ops::SqueezeOpInferShape, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(squeeze_grad, ops::SqueezeGradOp, ops::SqueezeGradInferShape); diff --git a/paddle/fluid/operators/sum_mkldnn_op.cc b/paddle/fluid/operators/sum_mkldnn_op.cc new file mode 100644 index 0000000000..d2035777ee --- /dev/null +++ b/paddle/fluid/operators/sum_mkldnn_op.cc @@ -0,0 +1,240 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/*Licensed under the Apache License, Version 2.0(the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#include "mkldnn.hpp" +#include "paddle/fluid/framework/tensor.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" +#include "paddle/fluid/operators/sum_op.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/mkldnn_helper.h" + +namespace paddle { +namespace operators { + +using paddle::framework::Tensor; +using paddle::platform::MKLDNNDeviceContext; +using paddle::platform::CPUDeviceContext; +using framework::DataLayout; +using mkldnn::memory; +using mkldnn::primitive; +using mkldnn::stream; +using mkldnn::sum; +using mkldnn::reorder; +using platform::to_void_cast; + +template +class SumMKLDNNOpKernel : public paddle::framework::OpKernel { + public: + void Compute(const paddle::framework::ExecutionContext& ctx) const override { + PADDLE_ENFORCE(paddle::platform::is_cpu_place(ctx.GetPlace()), + "It must use CPUPlace."); + auto& dev_ctx = ctx.template device_context(); + const auto& mkldnn_engine = dev_ctx.GetEngine(); + auto in_vars = ctx.MultiInputVar("X"); + + const int N = in_vars.size(); + auto out_var = ctx.OutputVar("Out"); + bool in_place = out_var == in_vars[0]; + + if (out_var->IsType()) { + LoDTensor* output = ctx.Output("Out"); + T* output_data = output->mutable_data(ctx.GetPlace()); + + std::vector dst_tz = framework::vectorize2int(output->dims()); + auto src_tz = dst_tz; + memory::format output_format{memory::format::format_undef}; + std::vector scales; + std::vector srcs_mpd; + std::vector srcs_mem; + + PADDLE_ENFORCE(in_vars[0]->IsType(), + "Input[0] must be LoDTensors"); + auto& input0 = in_vars[0]->Get(); + PADDLE_ENFORCE(input0.layout() == DataLayout::kMKLDNN && + input0.format() != memory::format::format_undef, + "Wrong layout/format for inputs[0]"); + + memory::format input_format = input0.format(); + + if (src_tz.size() == 1 && (input_format == memory::format::nchw || + input_format == memory::format::nhwc)) { + input_format = memory::format::x; + } + if (src_tz.size() == 2 && (input_format == memory::format::nchw || + input_format == memory::format::nhwc)) { + input_format = memory::format::nc; + } + + for (int i = 0; i < N; i++) { + PADDLE_ENFORCE(in_vars[i]->IsType(), + "all inputs must be all LoDTensors"); + auto& input = in_vars[i]->Get(); + PADDLE_ENFORCE(input.layout() == DataLayout::kMKLDNN && + input.format() != memory::format::format_undef, + "Wrong layout/format for inputs"); + + if (input.numel() == 0) { + continue; + } + + const T* input_data = input.data(); + + auto src_md = + memory::desc(src_tz, memory::data_type::f32, input_format); + auto src_mpd = memory::primitive_desc(src_md, mkldnn_engine); + auto src_mem = memory(src_mpd, to_void_cast(input_data)); + srcs_mpd.push_back(src_mpd); + srcs_mem.push_back(src_mem); + scales.push_back(1.0); + } + + auto dst_md = + memory::desc(dst_tz, memory::data_type::f32, memory::format::any); + + auto sum_pd = sum::primitive_desc(dst_md, scales, srcs_mpd); + + std::shared_ptr dst_mem; + if (in_place) { + dst_mem.reset(new memory(sum_pd.dst_primitive_desc())); + } else { + dst_mem.reset(new memory(sum_pd.dst_primitive_desc(), output_data)); + } + std::vector inputs; + for (size_t i = 0; i < srcs_mem.size(); ++i) { + inputs.push_back(srcs_mem[i]); + } + + auto sum_prim = mkldnn::sum(sum_pd, inputs, *dst_mem); + output_format = (memory::format)platform::GetMKLDNNFormat(sum_pd); + + primitive reorder_prim; + std::shared_ptr target_mem; + if (in_place) { + output_format = input_format; + target_mem.reset(new memory( + {{{src_tz}, memory::data_type::f32, output_format}, mkldnn_engine}, + output_data)); + reorder_prim = reorder(*dst_mem, *target_mem); + } + + std::vector pipeline; + pipeline.push_back(sum_prim); + if (in_place) pipeline.push_back(reorder_prim); + stream(stream::kind::eager).submit(pipeline).wait(); + + output->set_layout(DataLayout::kMKLDNN); + output->set_format(output_format); + } else if (out_var->IsType()) { + // TODO(@mozga-intel) Add MKLDNN SelectedRows support + std::unique_ptr in0; + if (in_place) { + // If is in_place, we store the input[0] to in0 + auto& in_sel0 = in_vars[0]->Get(); + auto& rows = in_sel0.rows(); + in0.reset(new framework::SelectedRows(rows, in_sel0.height())); + in0->mutable_value()->ShareDataWith(in_sel0.value()); + } + + auto get_selected_row = [&](size_t i) -> const SelectedRows& { + if (i == 0 && in0) { + return *in0.get(); + } else { + return in_vars[i]->Get(); + } + }; + auto* out = ctx.Output("Out"); + out->mutable_rows()->clear(); + auto* out_value = out->mutable_value(); + + // Runtime InferShape + size_t first_dim = 0; + for (int i = 0; i < N; i++) { + auto& sel_row = get_selected_row(i); + first_dim += sel_row.rows().size(); + } + auto in_dim = + framework::vectorize(get_selected_row(N - 1).value().dims()); + in_dim[0] = static_cast(first_dim); + + out_value->Resize(framework::make_ddim(in_dim)); + + // if all the input sparse vars are empty, no need to + // merge these vars. + if (first_dim == 0UL) { + return; + } + out_value->mutable_data(ctx.GetPlace()); + math::SelectedRowsAddTo functor; + int64_t offset = 0; + for (int i = 0; i < N; i++) { + auto& sel_row = get_selected_row(i); + if (sel_row.rows().size() == 0) { + continue; + } + PADDLE_ENFORCE_EQ(out->height(), sel_row.height()); + functor(ctx.template device_context(), sel_row, + offset, out); + offset += sel_row.value().numel(); + } + } else if (out_var->IsType()) { + // TODO(@mozga-intel) Add MKLDNN LoDTensorArray support + auto& out_array = *out_var->GetMutable(); + for (size_t i = in_place ? 1 : 0; i < in_vars.size(); ++i) { + PADDLE_ENFORCE(in_vars[i]->IsType(), + "Only support all inputs are TensorArray"); + auto& in_array = in_vars[i]->Get(); + + for (size_t i = 0; i < in_array.size(); ++i) { + if (in_array[i].numel() != 0) { + if (i >= out_array.size()) { + out_array.resize(i + 1); + } + if (out_array[i].numel() == 0) { + framework::TensorCopy(in_array[i], in_array[i].place(), + ctx.device_context(), &out_array[i]); + out_array[i].set_lod(in_array[i].lod()); + } else { + PADDLE_ENFORCE(out_array[i].lod() == in_array[i].lod()); + auto in = EigenVector::Flatten(in_array[i]); + auto result = EigenVector::Flatten(out_array[i]); + result.device(*ctx.template device_context() + .eigen_device()) = result + in; + } + } + } + } + } else { + PADDLE_THROW("Unexpected branch, output variable type is %s", + out_var->Type().name()); + } + } +}; + +} // namespace operators +} // namespace paddle + +REGISTER_OP_KERNEL(sum, MKLDNN, ::paddle::platform::CPUPlace, + paddle::operators::SumMKLDNNOpKernel); diff --git a/paddle/fluid/operators/sum_op.cc b/paddle/fluid/operators/sum_op.cc index 108f26fafe..fe7c7039c7 100644 --- a/paddle/fluid/operators/sum_op.cc +++ b/paddle/fluid/operators/sum_op.cc @@ -18,6 +18,10 @@ limitations under the License. */ #include "paddle/fluid/framework/var_type_inference.h" #include "paddle/fluid/operators/detail/safe_ref.h" +#ifdef PADDLE_WITH_MKLDNN +#include "paddle/fluid/platform/mkldnn_helper.h" +#endif + namespace paddle { namespace operators { using framework::Tensor; @@ -63,6 +67,18 @@ class SumOp : public framework::OperatorWithKernel { framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext& ctx) const override { auto x_vars = ctx.MultiInputVar("X"); + + framework::LibraryType library{framework::LibraryType::kPlain}; + framework::DataLayout layout{framework::DataLayout::kAnyLayout}; + +#ifdef PADDLE_WITH_MKLDNN + if (library == framework::LibraryType::kPlain && + platform::CanMKLDNNBeUsed(ctx)) { + library = framework::LibraryType::kMKLDNN; + layout = framework::DataLayout::kMKLDNN; + } +#endif + if (x_vars[0]->IsType()) { int dtype = -1; for (auto& x_var : x_vars) { @@ -80,26 +96,27 @@ class SumOp : public framework::OperatorWithKernel { "Sum operator should have at least one tensor"); return framework::OpKernelType( - static_cast(dtype), - ctx.device_context()); + static_cast(dtype), ctx.GetPlace(), + layout, library); } else if (x_vars[0]->IsType()) { for (auto& var : x_vars) { auto& value = var->Get().value(); if (value.IsInitialized()) { return framework::OpKernelType(framework::ToDataType(value.type()), - ctx.device_context()); + ctx.device_context(), layout, library); } } // if input sparse vars are not initialized, use an default kernel type. return framework::OpKernelType(framework::proto::VarType::FP32, - ctx.device_context()); + ctx.device_context(), layout, library); } else if (x_vars[0]->IsType()) { for (auto& x_var : x_vars) { auto& array = x_var->Get(); for (auto& each : array) { if (each.numel() != 0) { return framework::OpKernelType(framework::ToDataType(each.type()), - ctx.device_context()); + ctx.device_context(), layout, + library); } } } @@ -112,11 +129,13 @@ class SumOp : public framework::OperatorWithKernel { class SumOpMaker : public framework::OpProtoAndCheckerMaker { public: - SumOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(vector) The input tensors of sum operator.") .AsDuplicable(); - AddOutput("Out", "(Tensor) The output tensor of sum operator."); + AddOutput("Out", "(Tensor) The output tensor of sum operator.").Reuse("X"); + AddAttr("use_mkldnn", + "(bool, default false) Only used in mkldnn kernel") + .SetDefault(false); AddComment(R"DOC( Sum operator. @@ -133,7 +152,6 @@ class SumOpVarTypeInference : public framework::VarTypeInference { framework::BlockDesc* block) const override { auto& inputs = op_desc.Input("X"); auto var_type = framework::proto::VarType::SELECTED_ROWS; - for (auto& name : op_desc.Input("X")) { VLOG(10) << name << " " << block->FindRecursiveOrCreateVar(name).GetType(); @@ -207,6 +225,7 @@ namespace ops = paddle::operators; REGISTER_OPERATOR(sum, ops::SumOp, ops::SumOpMaker, ops::SumGradMaker, ops::SumOpVarTypeInference); + REGISTER_OP_CPU_KERNEL( sum, ops::SumKernel, ops::SumKernel, diff --git a/paddle/fluid/operators/tensor_array_read_write_op.cc b/paddle/fluid/operators/tensor_array_read_write_op.cc index 2636812c42..a2d44284e9 100644 --- a/paddle/fluid/operators/tensor_array_read_write_op.cc +++ b/paddle/fluid/operators/tensor_array_read_write_op.cc @@ -38,15 +38,14 @@ class WriteToArrayOp : public ArrayOp { << " to " << offset + 1; out->resize(offset + 1); } + auto *out_tensor = &out->at(offset); + out_tensor->set_lod(x_tensor.lod()); if (x_tensor.memory_size() > 0) { - auto *out_tensor = &out->at(offset); - platform::DeviceContextPool &pool = platform::DeviceContextPool::Instance(); auto &dev_ctx = *pool.Get(place); TensorCopy(x_tensor, place, dev_ctx, out_tensor); - out_tensor->set_lod(x_tensor.lod()); } else { VLOG(10) << "WARNING: The input tensor 'x_tensor' holds no memory, so " "nothing has been written to output array[" @@ -57,8 +56,7 @@ class WriteToArrayOp : public ArrayOp { class WriteToArrayOpProtoMaker : public framework::OpProtoAndCheckerMaker { public: - WriteToArrayOpProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(LoDTensor) the tensor will be written to tensor array"); AddInput( "I", @@ -148,8 +146,7 @@ class ReadFromArrayOp : public ArrayOp { class ReadFromArrayProtoMaker : public framework::OpProtoAndCheckerMaker { public: - ReadFromArrayProtoMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(TensorArray) the array will be read from."); AddInput("I", "(Tensor) the subscript index in tensor array. The number of " diff --git a/paddle/fluid/operators/tensorrt_engine_op.cc b/paddle/fluid/operators/tensorrt_engine_op.cc new file mode 100644 index 0000000000..ee3078876c --- /dev/null +++ b/paddle/fluid/operators/tensorrt_engine_op.cc @@ -0,0 +1,160 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#ifdef PADDLE_WITH_CUDA + +#include +#include + +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/engine.h" +#include "paddle/fluid/inference/utils/singleton.h" +#include "paddle/fluid/operators/tensorrt_engine_op.h" + +namespace paddle { + +DEFINE_int32(tensorrt_engine_batch_size, 1, "the batch_size of TensorRT"); + +namespace operators { + +using inference::Singleton; +using inference::tensorrt::TRT_EngineManager; + +using FluidDT = framework::proto::VarType_Type; +using TRT_DT = nvinfer1::DataType; + +namespace { + +TRT_DT FluidDataType2TRT(FluidDT type) { + switch (type) { + case FluidDT::VarType_Type_FP32: + return TRT_DT::kFLOAT; + case FluidDT::VarType_Type_INT32: + return TRT_DT::kINT32; + default: + return TRT_DT::kINT32; + } + PADDLE_THROW("unkown type"); + return TRT_DT::kINT32; +} + +nvinfer1::Dims Vec2TRT_Dims(const std::vector &shape) { + PADDLE_ENFORCE_GT(shape.size(), 1UL, + "TensorRT' tensor input requires at least 2 dimensions"); + PADDLE_ENFORCE_LE(shape.size(), 4UL, + "TensorRT' tensor input requires at most 4 dimensions"); + PADDLE_ENFORCE_EQ(shape.size(), 4UL); + return nvinfer1::DimsCHW(shape[1], shape[2], shape[3]); +} + +} // namespace + +template +void TensorRTEngineKernel::Prepare( + const framework::ExecutionContext &context) const { + VLOG(4) << "Prepare engine"; + // Get the ProgramDesc and pass to convert. + framework::proto::BlockDesc block_desc; + block_desc.ParseFromString(context.Attr("subgraph")); + int max_batch = context.Attr("max_batch"); + auto max_workspace = context.Attr("max_workspace"); + auto params = context.Attr>("parameters"); + std::unordered_set parameters; + for (const auto ¶m : params) { + parameters.insert(param); + } + + std::vector output_maps = + context.Attr>("output_name_mapping"); + + // TODO(Superjomn) replace this with a different stream + auto *engine = Singleton::Global().Create( + max_batch, max_workspace, nullptr /*engine hold its own stream*/, + context.Attr("engine_uniq_key")); + engine->InitNetwork(); + + framework::BlockDesc block(nullptr /*programdesc*/, &block_desc); + VLOG(4) << "parsed var size " << block.AllVars().size(); + // Add inputs + VLOG(4) << "declare inputs"; + for (auto &input : context.Inputs("Xs")) { + if (parameters.count(input)) continue; + VLOG(4) << "declare input " << input; + auto *var = block.FindVar(input); + // TensorRT engine need to create parameters. The parameter's description + // should be set in + PADDLE_ENFORCE(var, "no variable called %s", input); + PADDLE_ENFORCE_EQ(var->GetType(), FluidDT::VarType_Type_LOD_TENSOR, + "TensorRT engine only takes LoDTensor as input"); + auto shape = var->GetShape(); + // For the special batch_size placeholder -1, drop it and pass the real + // shape of data. + // TODO(Superjomn) fix this with batch broadcast, or it can't handle + // variational batch size. + if (shape[0] == -1) { + shape[0] = FLAGS_tensorrt_engine_batch_size; + } + engine->DeclareInput( + input, FluidDataType2TRT( + var->Proto()->type().lod_tensor().tensor().data_type()), + Vec2TRT_Dims(shape)); + } + + inference::Singleton::Global().ConvertBlock( + block_desc, parameters, context.scope(), engine); + + // Add outputs + for (auto &output : output_maps) { + engine->DeclareOutput(output); + } + + engine->FreezeNetwork(); +} + +class TensorRTEngineOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("Xs", "A list of inputs.").AsDuplicable(); + AddOutput("Ys", "A list of outputs").AsDuplicable(); + AddAttr("subgraph", "the subgraph."); + AddAttr("engine_uniq_key", "unique key for the TRT engine."); + AddAttr("max_batch", "the maximum batch size."); + AddAttr("max_workspace", "the maximum batch size."); + AddComment("TensorRT engine operator."); + } +}; + +class TensorRTEngineInferVarType : public framework::VarTypeInference { + public: + void operator()(const framework::OpDesc &op_desc, + framework::BlockDesc *block) const override {} +}; + +} // namespace operators +} // namespace paddle + +namespace ops = paddle::operators; + +REGISTER_OPERATOR(tensorrt_engine, ops::TensorRTEngineOp, + ops::TensorRTEngineOpMaker, ops::TensorRTEngineOpMaker); + +REGISTER_OP_CPU_KERNEL( + tensorrt_engine, + ops::TensorRTEngineKernel, + ops::TensorRTEngineKernel, + ops::TensorRTEngineKernel, + ops::TensorRTEngineKernel); + +#endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/tensorrt_engine_op.h b/paddle/fluid/operators/tensorrt_engine_op.h new file mode 100644 index 0000000000..2cbe1213a2 --- /dev/null +++ b/paddle/fluid/operators/tensorrt_engine_op.h @@ -0,0 +1,138 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. */ + +#pragma once + +#ifdef PADDLE_WITH_CUDA + +#include +#include + +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/tensorrt/engine.h" + +namespace paddle { + +DECLARE_int32(tensorrt_engine_batch_size); + +namespace operators { + +using inference::Singleton; +using inference::tensorrt::TRT_EngineManager; + +class TensorRTEngineOp : public framework::OperatorWithKernel { + public: + using framework::OperatorWithKernel::OperatorWithKernel; + + protected: + void InferShape(framework::InferShapeContext* ctx) const override {} + + framework::OpKernelType GetExpectedKernelType( + const framework::ExecutionContext& ctx) const override { + auto input0 = ctx.Inputs("Xs").front(); + framework::OpKernelType kt = framework::OpKernelType( + framework::ToDataType(ctx.scope() + .FindVar(input0) + ->GetMutable() + ->type()), + platform::CPUPlace()); + return kt; + } +}; + +template +class TensorRTEngineKernel : public framework::OpKernel { + public: + void Compute(const framework::ExecutionContext& context) const override { + auto engine_name = context.Attr("engine_uniq_key"); + if (!Singleton::Global().HasEngine(engine_name)) { + Prepare(context); + } + auto* engine = Singleton::Global().Get(engine_name); + auto input_names = context.op().Inputs("Xs"); + PADDLE_ENFORCE(!input_names.empty(), "should pass more than one inputs"); + PADDLE_ENFORCE_LE(FLAGS_tensorrt_engine_batch_size, + context.Attr("max_batch")); + + std::vector output_maps = + context.Attr>("output_name_mapping"); + + auto params = context.Attr>("parameters"); + std::unordered_set parameters; + for (const auto& param : params) { + parameters.insert(param); + } + // Convert input tensor from fluid to engine. + for (const auto& x : context.Inputs("Xs")) { + if (parameters.count(x)) continue; + // convert input and copy to TRT engine's buffer + auto& t = inference::analysis::GetFromScope( + context.scope(), x); + if (platform::is_cpu_place(t.place())) { + engine->SetInputFromCPU(x, static_cast(t.data()), + t.memory_size()); + } else { + engine->SetInputFromGPU(x, static_cast(t.data()), + t.memory_size()); + } + } + // Execute the engine. + PADDLE_ENFORCE_GT(FLAGS_tensorrt_engine_batch_size, 0); + engine->Execute(FLAGS_tensorrt_engine_batch_size); + + // Convert output tensor from engine to fluid + int output_index = 0; + for (const auto& y : context.Outputs("Ys")) { + // convert output and copy to fluid. + nvinfer1::ITensor* trt_t = engine->GetITensor(output_maps[output_index]); + auto dims = trt_t->getDimensions(); + // Use the output ITensor's dims to reshape the Fluid Tensor. + std::vector ddim(dims.d, dims.d + dims.nbDims); + + auto* fluid_v = context.scope().FindVar(y); + PADDLE_ENFORCE_NOT_NULL(fluid_v, "no output variable called %s", y); + auto* fluid_t = fluid_v->GetMutable(); + + fluid_t->Resize(framework::make_ddim(ddim)); + + // TODO(Superjomn) find some way to determine which device to output the + // tensor. + // if (platform::is_cpu_place(fluid_t->place())) { + // TODO(Superjomn) change this float to dtype size. + auto size = inference::analysis::AccuDims(dims.d, dims.nbDims) * + FLAGS_tensorrt_engine_batch_size; + engine->GetOutputInCPU(output_maps[output_index], + fluid_t->mutable_data(platform::CPUPlace()), + size * sizeof(float)); + //} else { + // engine->GetOutputInGPU( + // y, fluid_t->mutable_data(platform::CUDAPlace()), + // size * sizeof(float)); + //} + output_index += 1; + } + + cudaStreamSynchronize(*engine->stream()); + } + + protected: + // Build the engine. + void Prepare(const framework::ExecutionContext& context) const; +}; + +} // namespace operators +} // namespace paddle + +#endif // PADDLE_WITH_CUDA diff --git a/paddle/fluid/operators/tensorrt_engine_op_test.cc b/paddle/fluid/operators/tensorrt_engine_op_test.cc new file mode 100644 index 0000000000..37657fa0b0 --- /dev/null +++ b/paddle/fluid/operators/tensorrt_engine_op_test.cc @@ -0,0 +1,218 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/framework/op_desc.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/inference/analysis/helper.h" +#include "paddle/fluid/inference/tensorrt/convert/op_converter.h" +#include "paddle/fluid/inference/tensorrt/convert/ut_helper.h" + +USE_CPU_ONLY_OP(tensorrt_engine); + +namespace paddle { +namespace operators { + +namespace { +void CreateCPUTensor(framework::Scope* scope, const std::string& name, + const std::vector& shape) { + auto* var = scope->Var(name); + auto* tensor = var->GetMutable(); + auto dims = framework::make_ddim(shape); + tensor->Resize(dims); + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + inference::tensorrt::RandomizeTensor(tensor, place, ctx); +} + +void AddTensorToBlockDesc(framework::proto::BlockDesc* block, + const std::string& name, + const std::vector& shape) { + using framework::proto::VarType; + auto* var = block->add_vars(); + framework::VarDesc desc(name); + desc.SetType(VarType::LOD_TENSOR); + desc.SetDataType(VarType::FP32); + desc.SetShape(shape); + *var = *desc.Proto(); +} + +} // namespace + +using inference::analysis::SetAttr; + +TEST(TensorRTEngineOp, manual) { + framework::ProgramDesc program; + auto* block_ = program.Proto()->add_blocks(); + block_->set_idx(0); + block_->set_parent_idx(-1); + + LOG(INFO) << "create block desc"; + framework::BlockDesc block_desc(&program, block_); + LOG(INFO) << "create fc op"; + auto* fc0 = block_desc.AppendOp(); + fc0->SetType("fc"); + fc0->SetInput("X", std::vector({"x"})); // 4 x 1 x 1 + fc0->SetInput("Y", std::vector({"y"})); // 4 x 6 + fc0->SetOutput("Out", std::vector({"z"})); // 6 x 1 x 1 + + LOG(INFO) << "create fc op"; + auto* fc1 = block_desc.AppendOp(); + fc1->SetType("fc"); + fc1->SetInput("X", std::vector({"z"})); + fc1->SetInput("Y", std::vector({"y0"})); // 6 x 8 + fc1->SetOutput("Out", std::vector({"z0"})); // 8 x 1 x 1 + + // Set inputs' variable shape in BlockDesc + // the batch size is 2, so the dims of 'x' is {2, 4, 1, 1} + AddTensorToBlockDesc(block_, "x", std::vector({2, 4, 1, 1})); + AddTensorToBlockDesc(block_, "y", std::vector({4, 6})); + AddTensorToBlockDesc(block_, "y0", std::vector({6, 8})); + AddTensorToBlockDesc(block_, "z", std::vector({2, 6})); + + // It is wired, need to copy manually. + *block_->add_ops() = *fc0->Proto(); + *block_->add_ops() = *fc1->Proto(); + + ASSERT_EQ(block_->ops_size(), 2); + + LOG(INFO) << "create tensorrt desc"; + framework::OpDesc engine_op_desc(nullptr); + engine_op_desc.SetType("tensorrt_engine"); + engine_op_desc.SetInput("Xs", std::vector({"x"})); + engine_op_desc.SetOutput("Ys", std::vector({"z0"})); + SetAttr(engine_op_desc.Proto(), "subgraph", + block_->SerializeAsString()); + SetAttr(engine_op_desc.Proto(), "max_batch", 100); + SetAttr(engine_op_desc.Proto(), "max_workspace", 1 << 10); + SetAttr(engine_op_desc.Proto(), "engine_uniq_key", "a_engine"); + SetAttr>(engine_op_desc.Proto(), "parameters", + std::vector({})); + SetAttr>(engine_op_desc.Proto(), + "output_name_mapping", + std::vector({"z0"})); + + LOG(INFO) << "create engine op"; + auto engine_op = framework::OpRegistry::CreateOp(*engine_op_desc.Proto()); + LOG(INFO) << "engine_op " << engine_op.get(); + + framework::Scope scope; + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + // Prepare variables. + CreateCPUTensor(&scope, "x", std::vector({2, 4})); + CreateCPUTensor(&scope, "y", std::vector({4, 6})); + CreateCPUTensor(&scope, "z", std::vector({2, 6})); + + CreateCPUTensor(&scope, "y0", std::vector({6, 8})); + CreateCPUTensor(&scope, "z0", std::vector({2, 8})); + + // Execute them. + LOG(INFO) << "engine_op run"; + engine_op->Run(scope, place); +} + +void Execute(int batch_size, int input_dim, int output_dim, int nlayers = 1) { + framework::ProgramDesc program; + framework::Scope scope; + platform::CPUPlace place; + platform::CPUDeviceContext ctx(place); + + auto* block_ = program.Proto()->add_blocks(); + block_->set_idx(0); + block_->set_parent_idx(-1); + + using shape_t = std::vector; + + LOG(INFO) << "create block desc"; + framework::BlockDesc block_desc(&program, block_); + + auto AddFCLayer = [&](const std::string& x_name, const std::string& y_name, + const std::string& z_name, bool x_created, + const shape_t& x_shape, const shape_t& y_shape, + const shape_t& z_shape) { + LOG(INFO) << "create fc op"; + auto* fc = block_desc.AppendOp(); + fc->SetType("mul"); + fc->SetInput("X", std::vector({x_name})); + fc->SetInput("Y", std::vector({y_name})); + fc->SetOutput("Out", std::vector({z_name})); + + // Set inputs' variable shape in BlockDesc + if (!x_created) { + AddTensorToBlockDesc(block_, x_name, + std::vector({batch_size, input_dim, 1, 1})); + } + AddTensorToBlockDesc(block_, y_name, + std::vector({input_dim, output_dim})); + AddTensorToBlockDesc(block_, z_name, + std::vector({batch_size, output_dim})); + + // Prepare variables. + if (!x_created) { + CreateCPUTensor(&scope, x_name, std::vector(x_shape)); + } + CreateCPUTensor(&scope, y_name, std::vector(y_shape)); + CreateCPUTensor(&scope, z_name, std::vector(z_shape)); + + // It is wired, need to copy manually. + *block_->add_ops() = *fc->Proto(); + }; + + // Test with 4 layer FC + AddFCLayer("x0", "y0", "z0", false, {batch_size, input_dim}, + {input_dim, output_dim}, {batch_size, output_dim}); + AddFCLayer("z0", "y1", "z1", true, {}, {output_dim, output_dim}, + {batch_size, output_dim}); + AddFCLayer("z1", "y2", "z2", true, {}, {output_dim, output_dim}, + {batch_size, output_dim}); + AddFCLayer("z2", "y3", "z3", true, {}, {output_dim, output_dim}, + {batch_size, output_dim}); + + LOG(INFO) << "create tensorrt desc"; + framework::OpDesc engine_op_desc(nullptr); + engine_op_desc.SetType("tensorrt_engine"); + engine_op_desc.SetInput("Xs", std::vector({"x0"})); + engine_op_desc.SetOutput("Ys", std::vector({"z3"})); + + SetAttr(engine_op_desc.Proto(), "subgraph", + block_->SerializeAsString()); + SetAttr(engine_op_desc.Proto(), "max_batch", batch_size); + SetAttr(engine_op_desc.Proto(), "max_workspace", 2 << 10); + SetAttr>( + engine_op_desc.Proto(), "parameters", + std::vector({"y0", "y1", "y2", "y3"})); + SetAttr(engine_op_desc.Proto(), "engine_uniq_key", "b_engine"); + + SetAttr>(engine_op_desc.Proto(), + "output_name_mapping", + std::vector({"z3"})); + + auto engine_op = framework::OpRegistry::CreateOp(*engine_op_desc.Proto()); + + // Execute them. + engine_op->Run(scope, place); +} + +// Test with a larger FC layer. +TEST(TensorRTEngineOp, fc) { Execute(40, 28, 28); } + +} // namespace operators +} // namespace paddle + +USE_TRT_CONVERTER(fc) diff --git a/paddle/fluid/operators/test_send_nccl_id.cc b/paddle/fluid/operators/test_send_nccl_id.cc new file mode 100644 index 0000000000..e2b7b6b8e4 --- /dev/null +++ b/paddle/fluid/operators/test_send_nccl_id.cc @@ -0,0 +1,106 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include // NOLINT + +#include "gtest/gtest.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/operators/detail/macros.h" +#include "paddle/fluid/operators/distributed/request_handler_impl.h" +#include "paddle/fluid/operators/listen_and_serv_op.h" +#include "paddle/fluid/operators/math/math_function.h" +#include "paddle/fluid/operators/math/selected_rows_functor.h" +#include "paddle/fluid/platform/nccl_helper.h" +#include "paddle/fluid/string/printf.h" + +#ifdef PADDLE_WITH_GRPC +#include "paddle/fluid/operators/send_recv_util.h" +#endif + +USE_NO_KERNEL_OP(listen_and_serv); + +namespace f = paddle::framework; +namespace p = paddle::platform; +namespace m = paddle::operators::math; +namespace distributed = paddle::operators::distributed; +namespace string = paddle::string; + +std::unique_ptr g_rpc_service; +std::unique_ptr g_req_handler; + +void StartServer() { + f::Scope scope; + p::CPUPlace place; + scope.Var(NCCL_ID_VARNAME); + p::DeviceContextPool& pool = p::DeviceContextPool::Instance(); + auto& dev_ctx = *pool.Get(p::CPUPlace()); + + f::ProgramDesc empty_program; + f::Executor executor(dev_ctx.GetPlace()); + g_req_handler->SetScope(&scope); + g_req_handler->SetDevCtx(&dev_ctx); + g_req_handler->SetProgram(&empty_program); + g_req_handler->SetExecutor(&executor); + + g_rpc_service->RegisterRPC(distributed::kRequestSend, g_req_handler.get()); + g_req_handler->SetRPCServer(g_rpc_service.get()); + + std::thread server_thread( + std::bind(&distributed::RPCServer::StartServer, g_rpc_service.get())); + + g_rpc_service->SetCond(distributed::kRequestSend); + g_rpc_service->WaitBarrier(distributed::kRequestSend); + + LOG(INFO) << "got nccl id and stop server..."; + g_rpc_service->ShutDown(); + server_thread.join(); +} + +TEST(SendNcclId, RPCServer) { + g_req_handler.reset(new distributed::RequestSendHandler(true)); + g_rpc_service.reset(new RPCSERVER_T("127.0.0.1:0", 1)); + + std::thread server_thread(StartServer); + g_rpc_service->WaitServerReady(); + + f::Scope scope; + p::CPUPlace place; + p::DeviceContextPool& pool = p::DeviceContextPool::Instance(); + auto& dev_ctx = *pool.Get(p::CPUPlace()); + + auto var = scope.Var(NCCL_ID_VARNAME); + auto id = var->GetMutable(); + p::dynload::ncclGetUniqueId(id); + + int port = g_rpc_service->GetSelectedPort(); + + std::string ep = string::Sprintf("127.0.0.1:%d", port); + + distributed::RPCClient* client = + distributed::RPCClient::GetInstance(); + + LOG(INFO) << "connect to server" << ep; + client->AsyncSendVar(ep, dev_ctx, scope, NCCL_ID_VARNAME); + client->Wait(); + client->AsyncSendBatchBarrier(ep); + client->Wait(); + + server_thread.join(); + g_rpc_service.reset(nullptr); + g_req_handler.reset(nullptr); +} diff --git a/paddle/fluid/operators/top_k_op.cc b/paddle/fluid/operators/top_k_op.cc index 942a5de3f9..4a8ac441cf 100644 --- a/paddle/fluid/operators/top_k_op.cc +++ b/paddle/fluid/operators/top_k_op.cc @@ -48,10 +48,9 @@ class TopkOp : public framework::OperatorWithKernel { class TopkOpMaker : public framework::OpProtoAndCheckerMaker { public: - TopkOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("X", "(Tensor) The input of Topk op"); - AddOutput("Out", "(Tensor) The output tensor of Topk op"); + AddOutput("Out", "(Tensor) The output tensor of Topk op").Reuse("X"); AddOutput("Indices", "(Tensor) The indices of Topk elements of input"); AddComment(R"DOC( Top K operator diff --git a/paddle/fluid/operators/top_k_op.h b/paddle/fluid/operators/top_k_op.h index d44eeae8e6..054dd48199 100644 --- a/paddle/fluid/operators/top_k_op.h +++ b/paddle/fluid/operators/top_k_op.h @@ -55,8 +55,12 @@ class TopkKernel : public framework::OpKernel { // NOTE: eigen shape doesn't affect paddle tensor. eg_input.reshape(flat2dims); +#ifdef PADDLE_WITH_MKLML +#pragma omp parallel for +#endif for (size_t i = 0; i < row; i++) { std::vector> vec; + vec.reserve(col); for (size_t j = 0; j < col; j++) { vec.push_back(std::pair(eg_input(i, j), j)); } diff --git a/paddle/fluid/operators/transpose_op.cc b/paddle/fluid/operators/transpose_op.cc index 3555cb68ca..60556a564c 100644 --- a/paddle/fluid/operators/transpose_op.cc +++ b/paddle/fluid/operators/transpose_op.cc @@ -56,8 +56,7 @@ class TransposeOp : public framework::OperatorWithKernel { class TransposeOpMaker : public framework::OpProtoAndCheckerMaker { public: - TransposeOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor, tensors with rank up to 6 are supported."); diff --git a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc index 00f00bb403..75d6181749 100644 --- a/paddle/fluid/operators/uniform_random_batch_size_like_op.cc +++ b/paddle/fluid/operators/uniform_random_batch_size_like_op.cc @@ -32,14 +32,13 @@ class UniformRandomBatchSizeLikeOp : public BatchSizeLikeOp { }; class UniformRandomBatchSizeLikeOpMaker : public BatchSizeLikeOpMaker { - public: - UniformRandomBatchSizeLikeOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : BatchSizeLikeOpMaker(proto, op_checker) { + protected: + void Apply() override { AddComment(R"DOC( -Uniform random operator +UniformRandomBatchSizeLike operator. This operator initializes a tensor with the same batch_size as the Input tensor - with random values sampled from a uniform distribution. +with random values sampled from a uniform distribution. )DOC"); AddAttr("min", diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 3b5cf68dd4..edd1baa4ac 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -85,34 +85,25 @@ class UniformRandomOp : public framework::OperatorWithKernel { class UniformRandomOpMaker : public framework::OpProtoAndCheckerMaker { public: - UniformRandomOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : framework::OpProtoAndCheckerMaker(proto, op_checker) { - AddOutput("Out", "(Tensor) The output tensor of uniform random op"); + void Make() override { + AddOutput("Out", "The output tensor of uniform random op"); AddComment(R"DOC( -Uniform random operator. - This operator initializes a tensor with random values sampled from a -uniform distribution. +uniform distribution. The random result is in set [min, max]. )DOC"); - AddAttr>("shape", - "(vector) The shape of the output tensor"); - AddAttr("min", - "(float, default -1.0) " - "Minimum value of uniform random") + AddAttr>("shape", "The shape of the output tensor"); + AddAttr("min", "Minimum value of uniform random. [default -1.0].") .SetDefault(-1.0f); - AddAttr("max", - "(float, default 1.0) " - "Maximun value of uniform random") + AddAttr("max", "Maximun value of uniform random. [default 1.0].") .SetDefault(1.0f); AddAttr("seed", - "(int, default 0) " "Random seed used for generating samples. " "0 means use a seed generated by the system." "Note that if seed is not 0, this operator will always " - "generate the same random numbers every time.") + "generate the same random numbers every time. [default 0].") .SetDefault(0); - AddAttr("dtype", "(int, default 5(FP32)) Output tensor data type") + AddAttr("dtype", "Output tensor data type. [default 5(FP32)].") .SetDefault(framework::proto::VarType::FP32); } }; diff --git a/paddle/fluid/operators/unpool_op.cc b/paddle/fluid/operators/unpool_op.cc index b3cd87efa2..1d441b43b1 100644 --- a/paddle/fluid/operators/unpool_op.cc +++ b/paddle/fluid/operators/unpool_op.cc @@ -20,8 +20,7 @@ namespace operators { class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker { public: - Unpool2dOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput( "X", "(Tensor) The input tensor of unpool operator. " diff --git a/paddle/fluid/operators/unsqueeze_op.cc b/paddle/fluid/operators/unsqueeze_op.cc new file mode 100644 index 0000000000..f2a15fdf57 --- /dev/null +++ b/paddle/fluid/operators/unsqueeze_op.cc @@ -0,0 +1,191 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include +#include "paddle/fluid/framework/op_registry.h" + +namespace paddle { +namespace operators { + +class UnsqueezeOpInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + PADDLE_ENFORCE(ctx->HasInput("X"), + "Input(X) of UnsqueezeOp should not be null."); + PADDLE_ENFORCE(ctx->HasOutput("Out"), + "Output(Out) of UnsqueezeOp should not be null."); + + const auto &axes = ctx->Attrs().Get>("axes"); + const auto &x_dims = ctx->GetInputDim("X"); + // Validity Check: input tensor dims (<6). + PADDLE_ENFORCE(x_dims.size() <= 6, + "Invalid dimensions, the rank of Input(X) " + "should be in the range of [1, 6] (Eigen limit)"); + auto out_dims = GetOutputShape(axes, x_dims); + ctx->SetOutputDim("Out", out_dims); + if (x_dims[0] == out_dims[0]) { + // Only pass LoD when the first dimension of output and Input(X) + // are the same. + ctx->ShareLoD("X", "Out"); + } + } + + static framework::DDim GetOutputShape(const std::vector unsqz_dims, + const framework::DDim &in_dims) { + int output_size = in_dims.size() + static_cast(unsqz_dims.size()); + int cur_output_size = in_dims.size(); + std::vector output_shape(output_size, 0); + + // Validity Check: rank range. + PADDLE_ENFORCE(output_size <= 6, + "The output tensor's rank should be less than 6."); + + for (int axis : unsqz_dims) { + int cur = axis < 0 ? axis + cur_output_size + 1 : axis; + // Vaildity Check: the axis bound + PADDLE_ENFORCE( + cur >= 0 && cur <= cur_output_size, + "The unsqueeze dims must be within range of current rank."); + // Move old axis, and insert new axis + for (int i = cur_output_size; i >= cur; --i) { + if (output_shape[i] == 1) { + // Move axis + output_shape[i + 1] = 1; + output_shape[i] = 0; + } + } + output_shape[cur] = 1; + // Add the output size. + cur_output_size++; + } + + // Make output shape + for (int in_idx = 0, out_idx = 0; out_idx < output_size; ++out_idx) { + if (output_shape[out_idx] == 0) { + output_shape[out_idx] = in_dims[in_idx++]; + } + } + + return framework::make_ddim(output_shape); + } +}; + +class UnsqueezeOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto &axes = Attr>("axes"); + auto x_dims = scope.FindVar(Input("X"))->Get().dims(); + auto out_dims = UnsqueezeOpInferShape::GetOutputShape(axes, x_dims); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(out_dims); + attrs["inplace"] = Attr("inplace"); + // Invoke Reshape op. + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape", {{"X", {Input("X")}}, {"Shape", {}}}, + {{"Out", {Output("Out")}}}, attrs); + reshape_op->Run(scope, place); + } +}; + +class UnsqueezeOpMaker : public framework::OpProtoAndCheckerMaker { + public: + void Make() override { + AddInput("X", "(Tensor). The input tensor of unsqueeze operator."); + AddOutput("Out", "(Tensor). The output tensor of unsqueeze operator."); + AddAttr>("axes", + "(std::vector). List of integers," + " indicating the dimensions to be inserted") + .AddCustomChecker([](const std::vector &axes) { + PADDLE_ENFORCE(!axes.empty(), + "Invalid axes, The unsqueeze axes is empty."); + // Validity Check: axes dims (<6). + PADDLE_ENFORCE(static_cast(axes.size()) < 6, + "Invalid dimensions, dynamic dimensions should be " + "within [1, 6] dimensions (Eigen limit)."); + // Validity Check: the range of unsqueeze aixs. + for (int axis : axes) { + PADDLE_ENFORCE(axis < 6, + "Invalid dimensions, input axis should be" + " within [1, 6] dimensions (Eigen limit)."); + } + }); + AddAttr( + "inplace", + "(default: false) Unsqueeze the source tensor's shape without " + "memory copy. When Attr(inplace) is set true, the output " + "tensor shares memory with Input(X), otherwise, a new output " + "tensor is created, and its data are copied from Input(x).") + .SetDefault(false); + AddComment(R"DOC( + Unsqueeze Operator. + + Insert single-dimensional entries to the shape of a tensor. + Takes one required argument axes, a list of dimensions that will be inserted. + Dimension indices in axes are as seen in the output tensor. + + For example: + Given a tensor such that tensor with shape [3, 4, 5], + then Unsqueeze(tensor, axes=[0, 4]) has shape [1, 3, 4, 5, 1] + )DOC"); + } +}; + +class UnsqueezeGradInferShape : public framework::InferShapeBase { + public: + void operator()(framework::InferShapeContext *ctx) const override { + ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X")); + ctx->ShareLoD("X", framework::GradVarName("X")); + } +}; + +class UnsqueezeGradOp : public framework::OperatorBase { + public: + using OperatorBase::OperatorBase; + + private: + void RunImpl(const framework::Scope &scope, + const platform::Place &place) const override { + auto dx_name = Output(framework::GradVarName("X")); + auto dout_name = Input(framework::GradVarName("Out")); + auto x_dims = scope.FindVar(Input("X"))->Get().dims(); + + framework::AttributeMap attrs; + attrs["shape"] = framework::vectorize2int(x_dims); + attrs["inplace"] = Attr("inplace"); + + auto reshape_op = framework::OpRegistry::CreateOp( + "reshape", {{"X", {dout_name}}, {"Shape", {}}}, {{"Out", {dx_name}}}, + attrs); + reshape_op->Run(scope, place); + } +}; + +} // namespace operators +} // namespace paddle + +// Tell linker to use reshape op. +USE_OP(reshape); + +namespace ops = paddle::operators; +REGISTER_OPERATOR(unsqueeze, ops::UnsqueezeOp, ops::UnsqueezeOpMaker, + ops::UnsqueezeOpInferShape, + paddle::framework::DefaultGradOpDescMaker); +REGISTER_OPERATOR(unsqueeze_grad, ops::UnsqueezeGradOp, + ops::UnsqueezeGradInferShape); diff --git a/paddle/fluid/operators/warpctc_op.cc b/paddle/fluid/operators/warpctc_op.cc index 6835a5dd62..e06c8c962f 100644 --- a/paddle/fluid/operators/warpctc_op.cc +++ b/paddle/fluid/operators/warpctc_op.cc @@ -53,8 +53,7 @@ class WarpCTCOp : public framework::OperatorWithKernel { class WarpCTCOpMaker : public framework::OpProtoAndCheckerMaker { public: - WarpCTCOpMaker(OpProto* proto, OpAttrChecker* op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput("Logits", "(LodTensor, default: LoDTensor), the unscaled " "probabilities of variable-length sequences, which is a 2-D " diff --git a/paddle/fluid/operators/warpctc_op.h b/paddle/fluid/operators/warpctc_op.h index 075eb010c5..cb56f42a8d 100644 --- a/paddle/fluid/operators/warpctc_op.h +++ b/paddle/fluid/operators/warpctc_op.h @@ -187,8 +187,7 @@ class WarpCTCKernel : public framework::OpKernel { // warpctc accesses labels in CPU memory Tensor warpctc_label; - TensorCopy(*label, platform::CPUPlace(), ctx.device_context(), - &warpctc_label); + TensorCopySync(*label, platform::CPUPlace(), &warpctc_label); const int* warpctc_label_data = warpctc_label.data(); // warpctc stores loss in CPU memory Tensor warpctc_loss; diff --git a/paddle/fluid/operators/while_op.cc b/paddle/fluid/operators/while_op.cc index 710cc9fc2e..733157ea05 100644 --- a/paddle/fluid/operators/while_op.cc +++ b/paddle/fluid/operators/while_op.cc @@ -17,6 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/lod_tensor_array.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/framework/var_type.h" #include "paddle/fluid/operators/detail/safe_ref.h" namespace paddle { @@ -68,8 +69,7 @@ class WhileOp : public framework::OperatorBase { class WhileOpMaker : public framework::OpProtoAndCheckerMaker { public: - WhileOpMaker(OpProto *proto, OpAttrChecker *op_checker) - : OpProtoAndCheckerMaker(proto, op_checker) { + void Make() override { AddInput(kX, "A set of variables, which are required by operators inside the " "block of While Op.") @@ -136,15 +136,14 @@ class WhileGradOp : public framework::OperatorBase { auto &og_inside = detail::Ref(cur_scope.Var(inside_og_name), "Cannot find inside gradient %s", inside_og_name); - if (og_outside.Type().hash_code() == - typeid(framework::LoDTensor).hash_code()) { + if (framework::IsType(og_outside.Type())) { auto &outside_tensor = og_outside.Get(); auto &inside_tensor = detail::Ref(og_inside.GetMutable()); inside_tensor.set_lod(outside_tensor.lod()); inside_tensor.ShareDataWith(outside_tensor); - } else if (og_outside.Type().hash_code() == - typeid(framework::LoDTensorArray).hash_code()) { + } else if (framework::IsType( + og_outside.Type())) { auto &outside_array = og_outside.Get(); auto &inside_array = detail::Ref(og_inside.GetMutable()); @@ -204,11 +203,11 @@ class WhileGradOp : public framework::OperatorBase { ->set_lod(inside_tensor.lod()); } } - auto new_inside_name = cur_scope.Rename(inside_grad_name); auto sum_op = framework::OpRegistry::CreateOp( "sum", {{"X", {pg_names[param_id], new_inside_name}}}, - {{"Out", {pg_names[param_id]}}}, framework::AttributeMap{}); + {{"Out", {pg_names[param_id]}}}, + framework::AttributeMap{{"use_mkldnn", {false}}}); sum_op->Run(cur_scope, dev_place); cur_scope.Rename(new_inside_name, inside_grad_name); } diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 598fd4d419..f08c0e8e34 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -1,4 +1,4 @@ -proto_library(profiler_proto SRCS profiler.proto) +proto_library(profiler_proto SRCS profiler.proto DEPS framework_proto) py_proto_compile(profiler_py_proto SRCS profiler.proto) add_custom_target(profiler_py_proto_init ALL COMMAND ${CMAKE_COMMAND} -E touch __init__.py) @@ -18,7 +18,11 @@ else() endif() cc_test(enforce_test SRCS enforce_test.cc DEPS stringpiece enforce) -cc_library(cpu_info SRCS cpu_info.cc DEPS gflags glog enforce) +set(CPU_INFO_DEPS gflags glog enforce) +IF(WITH_XBYAK) + list(APPEND CPU_INFO_DEPS xbyak) +ENDIF() +cc_library(cpu_info SRCS cpu_info.cc DEPS ${CPU_INFO_DEPS}) cc_test(cpu_info_test SRCS cpu_info_test.cc DEPS cpu_info) nv_library(gpu_info SRCS gpu_info.cc DEPS gflags glog enforce) @@ -28,6 +32,9 @@ cc_test(place_test SRCS place_test.cc DEPS place glog gflags) add_subdirectory(dynload) +cc_library(cpu_helper SRCS cpu_helper.cc DEPS cblas enforce) +cc_test(cpu_helper_test SRCS cpu_helper_test.cc DEPS cpu_helper) + IF(WITH_GPU) set(GPU_CTX_DEPS dynload_cuda dynamic_loader) ELSE() @@ -42,16 +49,22 @@ ENDIF() # memcpy depends on device_context, here add deps individually for # avoiding cycle dependencies -cc_library(device_context SRCS device_context.cc DEPS malloc - place eigen3 ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) +cc_library(device_context SRCS device_context.cc init.cc DEPS malloc + place eigen3 stringpiece cpu_helper framework_proto ${GPU_CTX_DEPS} ${MKLDNN_CTX_DEPS}) nv_test(device_context_test SRCS device_context_test.cu DEPS device_context gpu_info) +cc_test(init_test SRCS init_test.cc DEPS device_context) + nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda) nv_test(transform_test SRCS transform_test.cu DEPS memory place device_context) -cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto ${GPU_CTX_DEPS}) +cc_library(device_tracer SRCS device_tracer.cc DEPS boost profiler_proto framework_proto ${GPU_CTX_DEPS}) cc_library(profiler SRCS profiler.cc DEPS device_context device_tracer) cc_test(profiler_test SRCS profiler_test.cc DEPS profiler) -nv_test(float16_gpu_test SRCS float16_test.cu) -cc_test(float16_test SRCS float16_test.cc) +nv_test(float16_gpu_test SRCS float16_test.cu DEPS lod_tensor) +cc_test(float16_test SRCS float16_test.cc DEPS lod_tensor) + +IF(WITH_GPU) + nv_test(cuda_helper_test SRCS cuda_helper_test.cu) +ENDIF() diff --git a/paddle/fluid/platform/assert.h b/paddle/fluid/platform/assert.h index 123d3598f4..2ce9b31bb8 100644 --- a/paddle/fluid/platform/assert.h +++ b/paddle/fluid/platform/assert.h @@ -17,7 +17,7 @@ limitations under the License. */ #define STRINGIFY(x) #x #define TOSTRING(x) STRINGIFY(x) -#if defined(__APPLE__) && defined(__CUDA_ARCH__) && !defined(NDEBUG) +#if defined(__CUDA_ARCH__) #include #define PADDLE_ASSERT(e) \ do { \ @@ -38,6 +38,9 @@ limitations under the License. */ } while (0) #else #include -#define PADDLE_ASSERT(e) assert(e) +// For cuda, the assertions can affect performance and it is therefore +// recommended to disable them in production code +// https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion +#define PADDLE_ASSERT(e) assert((e)) #define PADDLE_ASSERT_MSG(e, m) assert((e) && (m)) #endif diff --git a/paddle/fluid/platform/cpu_helper.cc b/paddle/fluid/platform/cpu_helper.cc new file mode 100644 index 0000000000..234a04b5c2 --- /dev/null +++ b/paddle/fluid/platform/cpu_helper.cc @@ -0,0 +1,44 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/cpu_helper.h" +#include "paddle/fluid/platform/enforce.h" + +#ifdef PADDLE_WITH_MKLML +#include +#include "paddle/fluid/platform/dynload/mklml.h" +#endif + +#ifdef PADDLE_USE_OPENBLAS +#include +#endif + +namespace paddle { +namespace platform { + +void SetNumThreads(int num_threads) { +#ifdef PADDLE_USE_OPENBLAS + int real_num_threads = num_threads > 1 ? num_threads : 1; + openblas_set_num_threads(real_num_threads); +#elif defined(PADDLE_WITH_MKLML) + int real_num_threads = num_threads > 1 ? num_threads : 1; + platform::dynload::MKL_Set_Num_Threads(real_num_threads); + omp_set_num_threads(num_threads); +#else + PADDLE_ENFORCE(false, "To be implemented."); +#endif +} + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cpu_helper.h b/paddle/fluid/platform/cpu_helper.h new file mode 100644 index 0000000000..78fc392b63 --- /dev/null +++ b/paddle/fluid/platform/cpu_helper.h @@ -0,0 +1,26 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include + +namespace paddle { +namespace platform { + +//! Set the number of threads in use. +void SetNumThreads(int num_threads); + +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/cpu_helper_test.cc b/paddle/fluid/platform/cpu_helper_test.cc new file mode 100644 index 0000000000..dc1b2b56cd --- /dev/null +++ b/paddle/fluid/platform/cpu_helper_test.cc @@ -0,0 +1,22 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/cpu_helper.h" + +#include "gtest/gtest.h" + +TEST(CpuHelper, SetNumThread) { + paddle::platform::SetNumThreads(1); + paddle::platform::SetNumThreads(4); +} diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 4fc9aae8e3..7d53a684d6 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -14,6 +14,11 @@ limitations under the License. */ #include "paddle/fluid/platform/cpu_info.h" +#ifdef PADDLE_WITH_XBYAK +#include "xbyak/xbyak.h" +#include "xbyak/xbyak_util.h" +#endif + #ifdef __APPLE__ #include #include @@ -21,12 +26,23 @@ limitations under the License. */ #include #endif +#include #include "gflags/gflags.h" DEFINE_double(fraction_of_cpu_memory_to_use, 1, "Default use 100% of CPU memory for PaddlePaddle," "reserve the rest for page tables, etc"); +DEFINE_uint64(initial_cpu_memory_in_mb, +#ifdef PADDLE_WITH_MKLDNN + /* Aligned with mozga-intel, MKLDNN need at least 5000 MB + * to obtain the best performance*/ + 5000, +#else + 500, +#endif + "Initial CPU memory for PaddlePaddle, in MD unit."); + DEFINE_double( fraction_of_cuda_pinned_memory_to_use, 0.5, "Default use 50% of CPU memory as the pinned_memory for PaddlePaddle," @@ -63,8 +79,11 @@ size_t CpuMinChunkSize() { } size_t CpuMaxChunkSize() { - // Allow to allocate the maximum chunk size is roughly 3% of CPU memory. - return CpuMaxAllocSize() / 32; + // Allow to allocate the maximum chunk size is roughly 3% of CPU memory, + // or the initial_cpu_memory_in_mb. + return std::min( + static_cast(CpuMaxAllocSize() / 32), + static_cast(FLAGS_initial_cpu_memory_in_mb * 1 << 20)); } size_t CUDAPinnedMaxAllocSize() { @@ -84,5 +103,39 @@ size_t CUDAPinnedMaxChunkSize() { return CUDAPinnedMaxAllocSize() / 256; } +#ifdef PADDLE_WITH_XBYAK +namespace jit { + +static Xbyak::util::Cpu cpu; +bool MayIUse(const cpu_isa_t cpu_isa) { + using namespace Xbyak::util; // NOLINT + switch (cpu_isa) { + case sse42: + return cpu.has(Cpu::tSSE42); + case avx2: + return cpu.has(Cpu::tAVX2); + case avx512_common: + return cpu.has(Cpu::tAVX512F); + case avx512_core: + return true && cpu.has(Cpu::tAVX512F) && cpu.has(Cpu::tAVX512BW) && + cpu.has(Cpu::tAVX512VL) && cpu.has(Cpu::tAVX512DQ); + case avx512_core_vnni: + return true && cpu.has(Cpu::tAVX512F) && cpu.has(Cpu::tAVX512BW) && + cpu.has(Cpu::tAVX512VL) && cpu.has(Cpu::tAVX512DQ) && + cpu.has(Cpu::tAVX512_VNNI); + case avx512_mic: + return true && cpu.has(Cpu::tAVX512F) && cpu.has(Cpu::tAVX512CD) && + cpu.has(Cpu::tAVX512ER) && cpu.has(Cpu::tAVX512PF); + case avx512_mic_4ops: + return true && MayIUse(avx512_mic) && cpu.has(Cpu::tAVX512_4FMAPS) && + cpu.has(Cpu::tAVX512_4VNNIW); + case isa_any: + return true; + } + return false; +} + +} // namespace jit +#endif } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cpu_info.h b/paddle/fluid/platform/cpu_info.h index f06c2b67fe..f5f6766759 100644 --- a/paddle/fluid/platform/cpu_info.h +++ b/paddle/fluid/platform/cpu_info.h @@ -37,5 +37,25 @@ size_t CUDAPinnedMinChunkSize(); //! Get the maximum chunk size for buddy allocator. size_t CUDAPinnedMaxChunkSize(); +#ifdef PADDLE_WITH_XBYAK +namespace jit { + +typedef enum { + isa_any, + sse42, + avx2, + avx512_common, + avx512_core, + avx512_core_vnni, + avx512_mic, + avx512_mic_4ops, +} cpu_isa_t; // Instruction set architecture + +// May I use some instruction +inline bool MayIUse(const cpu_isa_t cpu_isa); + +} // namespace jit +#endif + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cuda_device_function.h b/paddle/fluid/platform/cuda_device_function.h index ecec4178f2..23457ff5fe 100644 --- a/paddle/fluid/platform/cuda_device_function.h +++ b/paddle/fluid/platform/cuda_device_function.h @@ -14,6 +14,10 @@ limitations under the License. */ #pragma once #include +// NOTE(): support float16 to half in header file. +#define PADDLE_CUDA_FP16 +#include +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace platform { @@ -36,6 +40,18 @@ __forceinline__ __device__ T CudaShuffleDownSync(unsigned mask, T val, #endif } +// CUDA 9.0 have native compatible float16 shfl_down +#if CUDA_VERSION < 9000 +template <> +__forceinline__ __device__ float16 CudaShuffleDownSync(unsigned mask, + float16 val, int delta, + int width) { + half tmp = static_cast(val); + __shfl_down(tmp, static_cast(delta), width); + return float16(tmp); +} +#endif + template __forceinline__ __device__ T CudaShuffleSync(unsigned mask, T val, int src_line, int width = 32) { @@ -46,6 +62,11 @@ __forceinline__ __device__ T CudaShuffleSync(unsigned mask, T val, int src_line, #endif } +template +HOSTDEVICE T Infinity() { + return INFINITY; +} + template __device__ T reduceSum(T val, int tid, int len) { // NOTE(zcd): The warp size should be taken from the diff --git a/paddle/fluid/platform/cuda_helper_test.cu b/paddle/fluid/platform/cuda_helper_test.cu new file mode 100644 index 0000000000..ca5ca1caeb --- /dev/null +++ b/paddle/fluid/platform/cuda_helper_test.cu @@ -0,0 +1,153 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#define PADDLE_CUDA_FP16 +#include "paddle/fluid/platform/cuda_device_function.h" +#include "paddle/fluid/platform/cuda_primitives.h" +#include "paddle/fluid/platform/float16.h" + +using paddle::platform::PADDLE_CUDA_NUM_THREADS; +using paddle::platform::float16; + +template +__global__ void AddKernel(const T* data_a, T* data_b, size_t num) { + for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < num; + i += blockDim.x * gridDim.x) { + paddle::platform::CudaAtomicAdd(&data_b[i], data_a[i]); + } +} + +template +struct AddFunctor { + T operator()(const T& a, const T& b) { return a + b; } +}; + +template +void TestCase(size_t num) { + T *in1, *in2, *out; + T *d_in1, *d_in2; + size_t size = sizeof(T) * num; + cudaMalloc(reinterpret_cast(&d_in1), size); + cudaMalloc(reinterpret_cast(&d_in2), size); + in1 = reinterpret_cast(malloc(size)); + in2 = reinterpret_cast(malloc(size)); + out = reinterpret_cast(malloc(size)); + std::minstd_rand engine; + std::uniform_real_distribution dist(0.0, 1.0); + for (size_t i = 0; i < num; ++i) { + in1[i] = static_cast(dist(engine)); + in2[i] = static_cast(dist(engine)); + } + cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); + cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); + AddKernel<<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num); + cudaDeviceSynchronize(); + cudaMemcpy(out, d_in2, size, cudaMemcpyDeviceToHost); + cudaDeviceSynchronize(); + for (size_t i = 0; i < num; ++i) { + // NOTE(dzhwinter): the float16 add has small underflow/overflow + // so we use EXPECT_NEAR to check the result. + EXPECT_NEAR(static_cast(out[i]), + static_cast(AddFunctor()(in1[i], in2[i])), 0.001); + } + free(in1); + free(in2); + free(out); + cudaFree(d_in1); + cudaFree(d_in2); +} + +// cuda primitives +TEST(CudaAtomic, Add) { + TestCase(static_cast(10)); + TestCase(static_cast(1024 * 1024)); + + TestCase(static_cast(10)); + TestCase(static_cast(1024 * 1024)); +} + +TEST(CudaAtomic, float16) { + TestCase(static_cast(1)); + TestCase(static_cast(2)); + TestCase(static_cast(3)); + + TestCase(static_cast(10)); + TestCase(static_cast(1024 * 1024)); +} + +// unalignment of uint8 +void TestUnalign(size_t num, const int shift_bit) { + PADDLE_ENFORCE(num % 2 == 0, "must be a multiple of 2"); + float16 *in1, *in2, *out; + float16 *d_in1, *d_in2; + size_t size = sizeof(uint8_t) * (num + shift_bit); + size_t array_size = sizeof(float16) * (num / 2); + + cudaMalloc(reinterpret_cast(&d_in1), size); + cudaMalloc(reinterpret_cast(&d_in2), size); + in1 = reinterpret_cast(malloc(size)); + in2 = reinterpret_cast(malloc(size)); + out = reinterpret_cast(malloc(size)); + + // right shift 1, mimic the unalignment of address + float16* r_in1 = + reinterpret_cast(reinterpret_cast(in1) + shift_bit); + float16* r_in2 = + reinterpret_cast(reinterpret_cast(in2) + shift_bit); + + std::minstd_rand engine; + std::uniform_real_distribution dist(0.0, 1.0); + for (size_t i = 0; i < num / 2; ++i) { + r_in1[i] = static_cast(dist(engine)); + r_in2[i] = static_cast(dist(engine)); + } + cudaMemcpy(d_in1, r_in1, array_size, cudaMemcpyHostToDevice); + cudaMemcpy(d_in2, r_in2, array_size, cudaMemcpyHostToDevice); + AddKernel<<<1, PADDLE_CUDA_NUM_THREADS>>>(d_in1, d_in2, num / 2); + cudaDeviceSynchronize(); + cudaMemcpy(out, d_in2, array_size, cudaMemcpyDeviceToHost); + cudaDeviceSynchronize(); + for (size_t i = 0; i < num / 2; ++i) { + // NOTE(dzhwinter): the float16 add has small underflow/overflow + // so we use EXPECT_NEAR to check the result. + EXPECT_NEAR(static_cast(out[i]), + static_cast(AddFunctor()(r_in1[i], r_in2[i])), + 0.001); + } + free(in1); + free(in2); + free(out); + cudaFree(d_in1); + cudaFree(d_in2); +} + +TEST(CudaAtomic, float16Unalign) { + // same with float16 testcase + TestUnalign(static_cast(2), /*shift_bit*/ 2); + TestUnalign(static_cast(1024), /*shift_bit*/ 2); + TestUnalign(static_cast(1024 * 1024), /*shift_bit*/ 2); + + // shift the address. + TestUnalign(static_cast(2), /*shift_bit*/ 1); + TestUnalign(static_cast(1024), /*shift_bit*/ 1); + TestUnalign(static_cast(1024 * 1024), /*shift_bit*/ 1); + + TestUnalign(static_cast(2), /*shift_bit*/ 3); + TestUnalign(static_cast(1024), /*shift_bit*/ 3); + TestUnalign(static_cast(1024 * 1024), /*shift_bit*/ 3); +} diff --git a/paddle/fluid/platform/cuda_primitives.h b/paddle/fluid/platform/cuda_primitives.h index d535ed2f89..67ea64833d 100644 --- a/paddle/fluid/platform/cuda_primitives.h +++ b/paddle/fluid/platform/cuda_primitives.h @@ -14,12 +14,14 @@ limitations under the License. */ #pragma once #include +#include +#include "paddle/fluid/platform/float16.h" namespace paddle { namespace platform { #define CUDA_ATOMIC_WRAPPER(op, T) \ - __device__ __forceinline__ T CudaAtomic##op(T* address, const T val) + __device__ __forceinline__ T CudaAtomic##op(T *address, const T val) #define USE_CUDA_ATOMIC(op, T) \ CUDA_ATOMIC_WRAPPER(op, T) { return atomic##op(address, val); } @@ -42,17 +44,17 @@ CUDA_ATOMIC_WRAPPER(Add, int64_t) { static_assert(sizeof(int64_t) == sizeof(long long int), // NOLINT "long long should be int64"); return CudaAtomicAdd( - reinterpret_cast(address), // NOLINT - static_cast(val)); // NOLINT + reinterpret_cast(address), // NOLINT + static_cast(val)); // NOLINT } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 600 USE_CUDA_ATOMIC(Add, double); #else CUDA_ATOMIC_WRAPPER(Add, double) { - unsigned long long int* address_as_ull = // NOLINT - reinterpret_cast(address); // NOLINT - unsigned long long int old = *address_as_ull, assumed; // NOLINT + unsigned long long int *address_as_ull = // NOLINT + reinterpret_cast(address); // NOLINT + unsigned long long int old = *address_as_ull, assumed; // NOLINT do { assumed = old; @@ -64,6 +66,67 @@ CUDA_ATOMIC_WRAPPER(Add, double) { return __longlong_as_double(old); } +#endif + +#ifdef PADDLE_CUDA_FP16 +// NOTE(dzhwinter): cuda do not have atomicCAS for half. +// Just use the half address as a unsigned value address and +// do the atomicCAS. According to the value store at high 16 bits +// or low 16 bits, then do a different sum and CAS. +// Given most warp-threads will failed on the atomicCAS, so this +// implemented should be avoided in high concurrency. It's will be +// slower than the way convert value into 32bits and do a full atomicCAS. + +// convert the value into float and do the add arithmetic. +// then store the result into a uint32. +inline static __device__ uint32_t add_to_low_half(uint32_t val, float x) { + float16 low_half; + // the float16 in lower 16bits + low_half.x = static_cast(val & 0xFFFFu); + low_half = static_cast(static_cast(low_half) + x); + return (val & 0xFFFF0000u) | low_half.x; +} + +inline static __device__ uint32_t add_to_high_half(uint32_t val, float x) { + float16 high_half; + // the float16 in higher 16bits + high_half.x = static_cast(val >> 16); + high_half = static_cast(static_cast(high_half) + x); + return (val & 0xFFFFu) | (static_cast(high_half.x) << 16); +} + +CUDA_ATOMIC_WRAPPER(Add, float16) { + // concrete packed float16 value may exsits in lower or higher 16bits + // of the 32bits address. + uint32_t *address_as_ui = reinterpret_cast( + reinterpret_cast(address) - + (reinterpret_cast(address) & 0x02)); + float val_f = static_cast(val); + uint32_t old = *address_as_ui; + uint32_t sum; + uint32_t newval; + uint32_t assumed; + if (((uintptr_t)address & 0x02) == 0) { + // the float16 value stay at lower 16 bits of the address. + do { + assumed = old; + old = atomicCAS(address_as_ui, assumed, add_to_low_half(assumed, val_f)); + } while (old != assumed); + float16 ret; + ret.x = old & 0xFFFFu; + return ret; + } else { + // the float16 value stay at higher 16 bits of the address. + do { + assumed = old; + old = atomicCAS(address_as_ui, assumed, add_to_high_half(assumed, val_f)); + } while (old != assumed); + float16 ret; + ret.x = old >> 16; + return ret; + } +} + #endif } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/cudnn_helper.h b/paddle/fluid/platform/cudnn_helper.h index c0d399d078..bb8b14bb9f 100644 --- a/paddle/fluid/platform/cudnn_helper.h +++ b/paddle/fluid/platform/cudnn_helper.h @@ -22,6 +22,8 @@ limitations under the License. */ #include "paddle/fluid/platform/float16.h" #include "paddle/fluid/platform/macros.h" +DECLARE_bool(cudnn_deterministic); + namespace paddle { namespace platform { @@ -57,13 +59,12 @@ inline const char* cudnnGetErrorString(cudnnStatus_t status) { #define CUDNN_VERSION_MIN(major, minor, patch) \ (CUDNN_VERSION >= ((major)*1000 + (minor)*100 + (patch))) -#define CUDNN_ENFORCE(condition) \ - do { \ - cudnnStatus_t status = condition; \ - if (status != CUDNN_STATUS_SUCCESS) { \ - VLOG(1) << ::paddle::platform::cudnnGetErrorString(status); \ - PADDLE_THROW("cuDNN call failed"); \ - } \ +#define CUDNN_ENFORCE(condition) \ + do { \ + cudnnStatus_t status = condition; \ + if (UNLIKELY(status != CUDNN_STATUS_SUCCESS)) { \ + PADDLE_THROW(::paddle::platform::cudnnGetErrorString(status)); \ + } \ } while (false) enum class DataLayout { // Not use @@ -76,8 +77,44 @@ enum class DataLayout { // Not use enum class PoolingMode { kMaximum, kAverage, + kMaximumDeterministic, }; +#if CUDNN_VERSION < 6000 +#pragma message "CUDNN version under 6.0 is supported at best effort." +#pragma message "We strongly encourage you to move to 6.0 and above." +#pragma message "This message is intended to annoy you enough to update." +#pragma message \ + "please see https://docs.nvidia.com/deeplearning/sdk/cudnn-release-notes/" + +inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { + switch (mode) { + case PoolingMode::kMaximumDeterministic: + return CUDNN_POOLING_MAX; + case PoolingMode::kAverage: + return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kMaximum: + return CUDNN_POOLING_MAX; + default: + PADDLE_THROW("Unexpected pooling mode."); + } +} +#else + +inline cudnnPoolingMode_t GetPoolingMode(const PoolingMode& mode) { + switch (mode) { + case PoolingMode::kMaximumDeterministic: + return CUDNN_POOLING_MAX_DETERMINISTIC; + case PoolingMode::kAverage: + return CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING; + case PoolingMode::kMaximum: + return CUDNN_POOLING_MAX; + default: + PADDLE_THROW("Unexpected pooling mode."); + } +} +#endif // CUDNN_VERSION < 6000 + template class CudnnDataType; @@ -293,9 +330,7 @@ class ScopedPoolingDescriptor { PADDLE_ENFORCE_EQ(kernel.size(), pads.size()); PADDLE_ENFORCE_EQ(kernel.size(), strides.size()); PADDLE_ENFORCE(dynload::cudnnSetPoolingNdDescriptor( - desc_, (mode == PoolingMode::kMaximum - ? CUDNN_POOLING_MAX - : CUDNN_POOLING_AVERAGE_COUNT_EXCLUDE_PADDING), + desc_, (GetPoolingMode(mode)), CUDNN_PROPAGATE_NAN, // Always propagate nans. kernel.size(), kernel.data(), pads.data(), strides.data())); return desc_; diff --git a/paddle/fluid/platform/device_context.cc b/paddle/fluid/platform/device_context.cc index 1f733d71bd..2cc26da013 100644 --- a/paddle/fluid/platform/device_context.cc +++ b/paddle/fluid/platform/device_context.cc @@ -10,6 +10,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/platform/device_context.h" +#include #include #include #include @@ -35,7 +36,7 @@ DeviceContextPool::DeviceContextPool( const std::vector& places) { PADDLE_ENFORCE_GT(places.size(), 0); using PtrType = std::unique_ptr; - std::unordered_set set; + std::set set; for (auto& p : places) { set.insert(p); } @@ -175,7 +176,6 @@ CUDADeviceContext::~CUDADeviceContext() { Place CUDADeviceContext::GetPlace() const { return place_; } void CUDADeviceContext::Wait() const { - std::lock_guard guard(mutex_); PADDLE_ENFORCE(cudaStreamSynchronize(stream_)); PADDLE_ENFORCE(cudaGetLastError()); } diff --git a/paddle/fluid/platform/device_context.h b/paddle/fluid/platform/device_context.h index a9c1984616..88e0383146 100644 --- a/paddle/fluid/platform/device_context.h +++ b/paddle/fluid/platform/device_context.h @@ -11,6 +11,7 @@ limitations under the License. */ #pragma once #include +#include // NOLINT #include #include #include @@ -26,12 +27,12 @@ limitations under the License. */ #include #endif +#include +#include "glog/logging.h" #include "paddle/fluid/platform/enforce.h" #include "paddle/fluid/platform/place.h" #include "unsupported/Eigen/CXX11/Tensor" -#include "glog/logging.h" - namespace paddle { namespace platform { @@ -100,7 +101,7 @@ class CUDADeviceContext : public DeviceContext { template void RecordEvent(cudaEvent_t ev, Callback callback) { - std::lock_guard guard(mutex_); + std::lock_guard guard(mtx_); callback(); PADDLE_ENFORCE(cudaEventRecord(ev, stream_)); } @@ -110,8 +111,6 @@ class CUDADeviceContext : public DeviceContext { std::unique_ptr eigen_device_; std::unique_ptr eigen_stream_; - - mutable std::recursive_mutex mutex_; cudaStream_t stream_; cudnnHandle_t cudnn_handle_; cublasHandle_t cublas_handle_; @@ -119,6 +118,8 @@ class CUDADeviceContext : public DeviceContext { int compute_capability; int multi_process; int max_threads_per_mp; + + std::mutex mtx_; }; template <> @@ -200,9 +201,7 @@ class DeviceContextPool { private: static DeviceContextPool* pool; - std::unordered_map, PlaceHash> - device_contexts_; + std::map> device_contexts_; DISABLE_COPY_AND_ASSIGN(DeviceContextPool); }; diff --git a/paddle/fluid/platform/device_context_test.cu b/paddle/fluid/platform/device_context_test.cu index fa806aba6d..171d2979a0 100644 --- a/paddle/fluid/platform/device_context_test.cu +++ b/paddle/fluid/platform/device_context_test.cu @@ -69,19 +69,3 @@ TEST(Device, DeviceContextPool) { ASSERT_NE(dev_ctx, nullptr); } } - -int main(int argc, char** argv) { - std::vector places; - - places.emplace_back(paddle::platform::CPUPlace()); - int count = paddle::platform::GetCUDADeviceCount(); - for (int i = 0; i < count; ++i) { - places.emplace_back(paddle::platform::CUDAPlace(i)); - } - - VLOG(0) << " DeviceCount " << count; - paddle::platform::DeviceContextPool::Init(places); - - testing::InitGoogleTest(&argc, argv); - return RUN_ALL_TESTS(); -} diff --git a/paddle/fluid/platform/device_tracer.cc b/paddle/fluid/platform/device_tracer.cc index c9e1063168..dc1d751141 100644 --- a/paddle/fluid/platform/device_tracer.cc +++ b/paddle/fluid/platform/device_tracer.cc @@ -30,9 +30,6 @@ limitations under the License. */ namespace paddle { namespace platform { namespace { -// Current thread's id. Note, we don't distinguish nested threads -// for now. -thread_local int cur_thread_id = 0; // Tracking the nested block stacks of each thread. thread_local std::deque block_id_stack; // Tracking the nested event stacks. @@ -192,6 +189,8 @@ void CUPTIAPI bufferCompleted(CUcontext ctx, uint32_t streamId, uint8_t *buffer, } } // namespace +#endif // PADDLE_WITH_CUPTI + class DeviceTracerImpl : public DeviceTracer { public: DeviceTracerImpl() : enabled_(false) {} @@ -245,9 +244,10 @@ class DeviceTracerImpl : public DeviceTracer { void Enable() { std::lock_guard l(trace_mu_); if (enabled_) { - fprintf(stderr, "DeviceTracer already enabled\n"); return; } + +#ifdef PADDLE_WITH_CUPTI EnableActivity(); // Register callbacks for buffer requests and completed by CUPTI. @@ -266,6 +266,7 @@ class DeviceTracerImpl : public DeviceTracer { dynload::cuptiEnableCallback(1, subscriber_, CUPTI_CB_DOMAIN_DRIVER_API, CUPTI_DRIVER_TRACE_CBID_cuLaunchKernel)); CUPTI_CALL(dynload::cuptiGetTimestamp(&start_ns_)); +#endif // PADDLE_WITH_CUPTI enabled_ = true; } @@ -317,17 +318,21 @@ class DeviceTracerImpl : public DeviceTracer { } void Disable() { +#ifdef PADDLE_WITH_CUPTI // flush might cause additional calls to DeviceTracker. dynload::cuptiActivityFlushAll(CUPTI_ACTIVITY_FLAG_FLUSH_FORCED); +#endif // PADDLE_WITH_CUPTI std::lock_guard l(trace_mu_); +#ifdef PADDLE_WITH_CUPTI DisableActivity(); dynload::cuptiUnsubscribe(subscriber_); CUPTI_CALL(dynload::cuptiGetTimestamp(&end_ns_)); - PADDLE_ENFORCE(dynload::cuptiFinalize()); +#endif // PADDLE_WITH_CUPTI enabled_ = false; } private: +#ifdef PADDLE_WITH_CUPTI static void CUPTIAPI ApiCallback(void *userdata, CUpti_CallbackDomain domain, CUpti_CallbackId cbid, const void *cbdata) { auto *cbInfo = reinterpret_cast(cbdata); @@ -345,7 +350,8 @@ class DeviceTracerImpl : public DeviceTracer { VLOG(1) << "Unhandled API Callback for " << domain << " " << cbid; } } - + CUpti_SubscriberHandle subscriber_; +#endif // PADDLE_WITH_CUPTI std::mutex trace_mu_; bool enabled_; uint64_t start_ns_; @@ -354,45 +360,9 @@ class DeviceTracerImpl : public DeviceTracer { std::vector mem_records_; std::vector cpu_records_; std::unordered_map correlations_; - CUpti_SubscriberHandle subscriber_; -}; - -#endif // PADDLE_WITH_CUPTI - -class DeviceTracerDummy : public DeviceTracer { - public: - DeviceTracerDummy() {} - - void AddAnnotation(uint64_t id, const std::string &anno) {} - - void AddCPURecords(const std::string &anno, uint64_t start_ns, - uint64_t end_ns, int64_t device_id, int64_t thread_id) {} - - void AddMemRecords(const std::string &name, uint64_t start_ns, - uint64_t end_ns, int64_t device_id, int64_t stream_id, - uint32_t correlation_id, uint64_t bytes) {} - - void AddKernelRecords(uint64_t start, uint64_t end, int64_t device_id, - int64_t stream_id, uint32_t correlation_id) {} - - bool IsEnabled() { return false; } - - void Enable() {} - - proto::Profile GenProfile(const std::string &profile_path) { - return proto::Profile(); - } - - void Disable() {} }; -void CreateTracer(DeviceTracer **t) { -#ifdef PADDLE_WITH_CUPTI - *t = new DeviceTracerImpl(); -#else - *t = new DeviceTracerDummy(); -#endif // PADDLE_WITH_CUPTI -} +void CreateTracer(DeviceTracer **t) { *t = new DeviceTracerImpl(); } DeviceTracer *GetDeviceTracer() { std::call_once(tracer_once_flag, CreateTracer, &tracer); @@ -415,12 +385,5 @@ void SetCurBlock(int block_id) { block_id_stack.push_back(block_id); } void ClearCurBlock() { block_id_stack.pop_back(); } int BlockDepth() { return block_id_stack.size(); } - -void SetCurThread(int thread_id) { cur_thread_id = thread_id; } - -void ClearCurThread() { cur_thread_id = 0; } - -int CurThread() { return cur_thread_id; } - } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/device_tracer.h b/paddle/fluid/platform/device_tracer.h index 0375c7439c..322996fb4f 100644 --- a/paddle/fluid/platform/device_tracer.h +++ b/paddle/fluid/platform/device_tracer.h @@ -13,6 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once +#include +#include +#include // NOLINT #include #include "paddle/fluid/platform/dynload/cupti.h" @@ -25,6 +28,12 @@ namespace platform { // WARN: Under Development. Don't depend on it yet. ////////////////////// +inline uint64_t PosixInNsec() { + struct timeval tv; + gettimeofday(&tv, nullptr); + return 1000 * (static_cast(tv.tv_sec) * 1000000 + tv.tv_usec); +} + // DeviceTracer performs the following tasks: // 1. Register cuda callbacks for various events: kernel, memcpy, etc. // 2. Collect cuda statistics: start/end ts, memory, etc. @@ -99,9 +108,5 @@ std::string CurAnnotation(); void SetCurBlock(int block_id); void ClearCurBlock(); int BlockDepth(); - -void SetCurThread(int thread_id); -void ClearCurThread(); -int CurThread(); } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/CMakeLists.txt b/paddle/fluid/platform/dynload/CMakeLists.txt index 364c4901b2..9da787a407 100644 --- a/paddle/fluid/platform/dynload/CMakeLists.txt +++ b/paddle/fluid/platform/dynload/CMakeLists.txt @@ -1,14 +1,23 @@ cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags enforce) -list(APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc nccl.cc) +list(APPEND CUDA_SRCS cublas.cc cudnn.cc curand.cc) + +# There is no macOS version of NCCL. +if (NOT APPLE) + list(APPEND CUDA_SRCS nccl.cc) +endif() + if (TENSORRT_FOUND) list(APPEND CUDA_SRCS tensorrt.cc) endif() - configure_file(cupti_lib_path.h.in ${CMAKE_CURRENT_BINARY_DIR}/cupti_lib_path.h) if (CUPTI_FOUND) list(APPEND CUDA_SRCS cupti.cc) endif(CUPTI_FOUND) nv_library(dynload_cuda SRCS ${CUDA_SRCS} DEPS dynamic_loader) cc_library(dynload_warpctc SRCS warpctc.cc DEPS dynamic_loader warpctc) +if (WITH_MKLML) + cc_library(dynload_mklml SRCS mklml.cc DEPS dynamic_loader mklml) +endif() +# TODO(TJ): add iomp, mkldnn? diff --git a/paddle/fluid/platform/dynload/cublas.h b/paddle/fluid/platform/dynload/cublas.h index 81acaff87d..25bcda7eed 100644 --- a/paddle/fluid/platform/dynload/cublas.h +++ b/paddle/fluid/platform/dynload/cublas.h @@ -45,7 +45,7 @@ extern void *cublas_dso_handle; std::call_once(cublas_dso_flag, []() { \ cublas_dso_handle = paddle::platform::dynload::GetCublasDsoHandle(); \ }); \ - void *p_##__name = dlsym(cublas_dso_handle, #__name); \ + static void *p_##__name = dlsym(cublas_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/cudnn.h b/paddle/fluid/platform/dynload/cudnn.h index 34d83e3956..77e46fa768 100644 --- a/paddle/fluid/platform/dynload/cudnn.h +++ b/paddle/fluid/platform/dynload/cudnn.h @@ -39,7 +39,7 @@ extern void EnforceCUDNNLoaded(const char* fn_name); cudnn_dso_handle = paddle::platform::dynload::GetCUDNNDsoHandle(); \ }); \ EnforceCUDNNLoaded(#__name); \ - void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ + static void* p_##__name = dlsym(cudnn_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/cupti.h b/paddle/fluid/platform/dynload/cupti.h index e64de7c20f..e8f4a82ef1 100644 --- a/paddle/fluid/platform/dynload/cupti.h +++ b/paddle/fluid/platform/dynload/cupti.h @@ -45,7 +45,7 @@ extern void *cupti_dso_handle; std::call_once(cupti_dso_flag, []() { \ cupti_dso_handle = paddle::platform::dynload::GetCUPTIDsoHandle(); \ }); \ - void *p_##__name = dlsym(cupti_dso_handle, #__name); \ + static void *p_##__name = dlsym(cupti_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ @@ -72,7 +72,6 @@ extern void *cupti_dso_handle; __macro(cuptiGetResultString); \ __macro(cuptiActivityGetNumDroppedRecords); \ __macro(cuptiActivityFlushAll); \ - __macro(cuptiFinalize); \ __macro(cuptiSubscribe); \ __macro(cuptiUnsubscribe); \ __macro(cuptiEnableCallback); \ diff --git a/paddle/fluid/platform/dynload/curand.h b/paddle/fluid/platform/dynload/curand.h index 46ad4379d5..5b9e0820e0 100644 --- a/paddle/fluid/platform/dynload/curand.h +++ b/paddle/fluid/platform/dynload/curand.h @@ -34,7 +34,7 @@ extern void *curand_dso_handle; std::call_once(curand_dso_flag, []() { \ curand_dso_handle = paddle::platform::dynload::GetCurandDsoHandle(); \ }); \ - void *p_##__name = dlsym(curand_dso_handle, #__name); \ + static void *p_##__name = dlsym(curand_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/dynamic_loader.cc b/paddle/fluid/platform/dynload/dynamic_loader.cc index 19c01dc5a9..93bf7c1351 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.cc +++ b/paddle/fluid/platform/dynload/dynamic_loader.cc @@ -36,8 +36,6 @@ DEFINE_string(cuda_dir, "", DEFINE_string(warpctc_dir, "", "Specify path for loading libwarpctc.so."); -DEFINE_string(lapack_dir, "", "Specify path for loading liblapack.so."); - DEFINE_string(nccl_dir, "", "Specify path for loading nccl library, such as libcublas, " "libcurand. For instance, /usr/local/cuda/lib64. If default, " @@ -49,6 +47,8 @@ DEFINE_string( tensorrt_dir, "", "Specify path for loading tensorrt library, such as libnvinfer.so."); +DEFINE_string(mklml_dir, "", "Specify path for loading libmklml_intel.so."); + namespace paddle { namespace platform { namespace dynload { @@ -76,6 +76,7 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path, VLOG(3) << "Try to find library: " << dso_path << " from default system path."; // default search from LD_LIBRARY_PATH/DYLD_LIBRARY_PATH + // and /usr/local/lib path void* dso_handle = dlopen(dso_path.c_str(), dynload_flags); // DYLD_LIBRARY_PATH is disabled after Mac OS 10.11 to @@ -97,6 +98,10 @@ static inline void* GetDsoHandleFromDefaultPath(const std::string& dso_path, } #endif + if (nullptr == dso_handle) { + LOG(WARNING) << "Can not find library: " << dso_path + << ". Please try to add the lib path to LD_LIBRARY_PATH."; + } return dso_handle; } @@ -182,14 +187,6 @@ void* GetWarpCTCDsoHandle() { #endif } -void* GetLapackDsoHandle() { -#if defined(__APPLE__) || defined(__OSX__) - return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.dylib"); -#else - return GetDsoHandleFromSearchPath(FLAGS_lapack_dir, "liblapacke.so"); -#endif -} - void* GetNCCLDsoHandle() { #if defined(__APPLE__) || defined(__OSX__) return GetDsoHandleFromSearchPath(FLAGS_nccl_dir, "libnccl.dylib"); @@ -206,6 +203,14 @@ void* GetTensorRtDsoHandle() { #endif } +void* GetMKLMLDsoHandle() { +#if defined(__APPLE__) || defined(__OSX__) + return GetDsoHandleFromSearchPath(FLAGS_mklml_dir, "libmklml_intel.dylib"); +#else + return GetDsoHandleFromSearchPath(FLAGS_mklml_dir, "libmklml_intel.so"); +#endif +} + } // namespace dynload } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/dynload/dynamic_loader.h b/paddle/fluid/platform/dynload/dynamic_loader.h index 0de3559b60..84fd2ce998 100644 --- a/paddle/fluid/platform/dynload/dynamic_loader.h +++ b/paddle/fluid/platform/dynload/dynamic_loader.h @@ -23,9 +23,9 @@ void* GetCUDNNDsoHandle(); void* GetCUPTIDsoHandle(); void* GetCurandDsoHandle(); void* GetWarpCTCDsoHandle(); -void* GetLapackDsoHandle(); void* GetNCCLDsoHandle(); void* GetTensorRtDsoHandle(); +void* GetMKLMLDsoHandle(); } // namespace dynload } // namespace platform diff --git a/paddle/fluid/platform/dynload/mklml.cc b/paddle/fluid/platform/dynload/mklml.cc new file mode 100644 index 0000000000..0f61a5e09b --- /dev/null +++ b/paddle/fluid/platform/dynload/mklml.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/fluid/platform/dynload/mklml.h" + +namespace paddle { +namespace platform { +namespace dynload { + +std::once_flag mklml_dso_flag; +void* mklml_dso_handle = nullptr; + +#define DEFINE_WRAP(__name) DynLoad__##__name __name + +MKLML_ROUTINE_EACH(DEFINE_WRAP); + +} // namespace dynload +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/dynload/mklml.h b/paddle/fluid/platform/dynload/mklml.h new file mode 100644 index 0000000000..17acefe8cd --- /dev/null +++ b/paddle/fluid/platform/dynload/mklml.h @@ -0,0 +1,71 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#pragma once + +#include +#include +#include // NOLINT +#include "paddle/fluid/platform/dynload/dynamic_loader.h" + +namespace paddle { +namespace platform { +namespace dynload { + +extern std::once_flag mklml_dso_flag; +extern void* mklml_dso_handle; + +/** + * The following macro definition can generate structs + * (for each function) to dynamic load mklml routine + * via operator overloading. + */ +#define DYNAMIC_LOAD_MKLML_WRAP(__name) \ + struct DynLoad__##__name { \ + template \ + auto operator()(Args... args) -> decltype(__name(args...)) { \ + using mklmlFunc = decltype(&::__name); \ + std::call_once(mklml_dso_flag, []() { \ + mklml_dso_handle = paddle::platform::dynload::GetMKLMLDsoHandle(); \ + }); \ + static void* p_##_name = dlsym(mklml_dso_handle, #__name); \ + return reinterpret_cast(p_##_name)(args...); \ + } \ + }; \ + extern DynLoad__##__name __name + +#define DECLARE_DYNAMIC_LOAD_MKLML_WRAP(__name) DYNAMIC_LOAD_MKLML_WRAP(__name) + +#define MKLML_ROUTINE_EACH(__macro) \ + __macro(cblas_sgemm); \ + __macro(cblas_saxpy); \ + __macro(cblas_scopy); \ + __macro(cblas_sgemv); \ + __macro(cblas_sgemm_batch); \ + __macro(cblas_dgemm); \ + __macro(cblas_daxpy); \ + __macro(cblas_dcopy); \ + __macro(cblas_dgemv); \ + __macro(cblas_dgemm_batch); \ + __macro(vsAdd); \ + __macro(vdAdd); \ + __macro(MKL_Set_Num_Threads) + +MKLML_ROUTINE_EACH(DECLARE_DYNAMIC_LOAD_MKLML_WRAP); + +#undef DYNAMIC_LOAD_MKLML_WRAP + +} // namespace dynload +} // namespace platform +} // namespace paddle diff --git a/paddle/fluid/platform/dynload/nccl.h b/paddle/fluid/platform/dynload/nccl.h index 37902ae20c..575516f818 100644 --- a/paddle/fluid/platform/dynload/nccl.h +++ b/paddle/fluid/platform/dynload/nccl.h @@ -37,7 +37,7 @@ extern void* nccl_dso_handle; std::call_once(nccl_dso_flag, []() { \ nccl_dso_handle = paddle::platform::dynload::GetNCCLDsoHandle(); \ }); \ - void* p_##__name = dlsym(nccl_dso_handle, #__name); \ + static void* p_##__name = dlsym(nccl_dso_handle, #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/dynload/tensorrt.h b/paddle/fluid/platform/dynload/tensorrt.h index f584a49da0..5d67658b94 100644 --- a/paddle/fluid/platform/dynload/tensorrt.h +++ b/paddle/fluid/platform/dynload/tensorrt.h @@ -40,7 +40,7 @@ extern void* tensorrt_dso_handle; paddle::platform::dynload::GetTensorRtDsoHandle(); \ PADDLE_ENFORCE(tensorrt_dso_handle, "load tensorrt so failed"); \ }); \ - void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \ + static void* p_##__name = dlsym(tensorrt_dso_handle, #__name); \ PADDLE_ENFORCE(p_##__name, "load %s failed", #__name); \ return reinterpret_cast(p_##__name)(args...); \ } \ diff --git a/paddle/fluid/platform/dynload/warpctc.h b/paddle/fluid/platform/dynload/warpctc.h index 7c70649d21..d157c1fda7 100644 --- a/paddle/fluid/platform/dynload/warpctc.h +++ b/paddle/fluid/platform/dynload/warpctc.h @@ -40,7 +40,7 @@ extern void* warpctc_dso_handle; std::call_once(warpctc_dso_flag, []() { \ warpctc_dso_handle = paddle::platform::dynload::GetWarpCTCDsoHandle(); \ }); \ - void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ + static void* p_##_name = dlsym(warpctc_dso_handle, #__name); \ return reinterpret_cast(p_##_name)(args...); \ } \ }; \ diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 7b8c29e1e6..566485cd3c 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -44,8 +44,10 @@ limitations under the License. */ #include "paddle/fluid/platform/dynload/cublas.h" #include "paddle/fluid/platform/dynload/cudnn.h" #include "paddle/fluid/platform/dynload/curand.h" +#ifndef __APPLE__ #include "paddle/fluid/platform/dynload/nccl.h" -#endif +#endif // __APPLE__ +#endif // PADDLE_WITH_CUDA namespace paddle { namespace platform { @@ -100,6 +102,15 @@ struct EnforceNotMet : public std::exception { const char* what() const noexcept { return err_str_.c_str(); } }; +struct EOFException : public std::exception { + std::string err_str_; + EOFException(const char* err_msg, const char* f, int l) { + err_str_ = string::Sprintf("%s at [%s:%d]", err_msg, f, l); + } + + const char* what() const noexcept { return err_str_.c_str(); } +}; + // Because most enforce conditions would evaluate to true, we can use // __builtin_expect to instruct the C++ compiler to generate code that // always forces branch prediction of true. @@ -111,7 +122,11 @@ template inline typename std::enable_if::type throw_on_error( bool stat, const Args&... args) { if (UNLIKELY(!(stat))) { +#ifndef REPLACE_ENFORCE_GLOG throw std::runtime_error(string::Sprintf(args...)); +#else + LOG(FATAL) << string::Sprintf(args...); +#endif } } @@ -121,8 +136,12 @@ template inline typename std::enable_if::type throw_on_error( cudaError_t e, const Args&... args) { if (UNLIKELY(e)) { +#ifndef REPLACE_ENFORCE_GLOG throw thrust::system_error(e, thrust::cuda_category(), string::Sprintf(args...)); +#else + LOG(FATAL) << string::Sprintf(args...); +#endif } } @@ -130,8 +149,12 @@ template inline typename std::enable_if::type throw_on_error( curandStatus_t stat, const Args&... args) { if (stat != CURAND_STATUS_SUCCESS) { +#ifndef REPLACE_ENFORCE_GLOG throw thrust::system_error(cudaErrorLaunchFailure, thrust::cuda_category(), string::Sprintf(args...)); +#else + LOG(FATAL) << string::Sprintf(args...); +#endif } } @@ -141,8 +164,12 @@ inline typename std::enable_if::type throw_on_error( if (stat == CUDNN_STATUS_SUCCESS) { return; } else { +#ifndef REPLACE_ENFORCE_GLOG throw std::runtime_error(platform::dynload::cudnnGetErrorString(stat) + string::Sprintf(args...)); +#else + LOG(FATAL) << string::Sprintf(args...); +#endif } } @@ -171,20 +198,30 @@ inline typename std::enable_if::type throw_on_error( } else if (stat == CUBLAS_STATUS_LICENSE_ERROR) { err = "CUBLAS: license error, "; } +#ifndef REPLACE_ENFORCE_GLOG throw std::runtime_error(err + string::Sprintf(args...)); +#else + LOG(FATAL) << err << string::Sprintf(args...); +#endif } +#ifndef __APPLE__ template inline typename std::enable_if::type throw_on_error( ncclResult_t stat, const Args&... args) { if (stat == ncclSuccess) { return; } else { +#ifndef REPLACE_ENFORCE_GLOG throw std::runtime_error(platform::dynload::ncclGetErrorString(stat) + string::Sprintf(args...)); +#else + LOG(FATAL) << platform::dynload::ncclGetErrorString(stat) + << string::Sprintf(args...); +#endif } } - +#endif // __APPLE__ #endif // PADDLE_WITH_CUDA template @@ -200,6 +237,7 @@ inline void throw_on_error(T e) { __FILE__, __LINE__); \ } while (false) +#ifndef REPLACE_ENFORCE_GLOG #define PADDLE_ENFORCE(...) \ do { \ try { \ @@ -209,7 +247,15 @@ inline void throw_on_error(T e) { __FILE__, __LINE__); \ } \ } while (false) +#else +#define PADDLE_ENFORCE(...) ::paddle::platform::throw_on_error(__VA_ARGS__); +#endif +#define PADDLE_THROW_EOF() \ + do { \ + throw ::paddle::platform::EOFException("There is no next data.", __FILE__, \ + __LINE__); \ + } while (false) /* * Some enforce helpers here, usage: * int a = 1; diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 57d751cc00..0e8684581a 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -210,3 +210,14 @@ TEST(ENFORCE_USER_DEFINED_CLASS, NE) { Dims a{{1, 2, 3, 4}}, b{{5, 6, 7, 8}}; ASSERT_THROW(PADDLE_ENFORCE_EQ(a, b), paddle::platform::EnforceNotMet); } + +TEST(EOF_EXCEPTION, THROW_EOF) { + bool caught_eof = false; + try { + PADDLE_THROW_EOF(); + } catch (paddle::platform::EOFException error) { + caught_eof = true; + EXPECT_TRUE(HasPrefix(StringPiece(error.what()), "There is no next data.")); + } + EXPECT_TRUE(caught_eof); +} diff --git a/paddle/fluid/platform/float16.h b/paddle/fluid/platform/float16.h index ffd183af68..efb021c838 100644 --- a/paddle/fluid/platform/float16.h +++ b/paddle/fluid/platform/float16.h @@ -67,8 +67,11 @@ struct float16; } // namespace platform } // namespace paddle +// NOTE(): +// Do not move the eigen.h header, otherwise the eigen_vector will failed. #include "paddle/fluid/framework/eigen.h" #include "paddle/fluid/platform/hostdevice.h" +#include "unsupported/Eigen/CXX11/Tensor" namespace paddle { namespace platform { @@ -898,6 +901,30 @@ struct is_pod { is_standard_layout::value; }; +template <> +struct is_floating_point + : std::integral_constant< + bool, std::is_same::type>::value> {}; +template <> +struct is_signed { + static const bool value = true; +}; + +template <> +struct is_unsigned { + static const bool value = false; +}; + +inline bool isnan(const paddle::platform::float16& a) { + return paddle::platform::isnan(a); +} + +inline bool isinf(const paddle::platform::float16& a) { + return paddle::platform::isinf(a); +} + template <> struct numeric_limits { static const bool is_specialized = true; diff --git a/paddle/fluid/platform/float16_test.cc b/paddle/fluid/platform/float16_test.cc index a589e32b61..27e930e6e0 100644 --- a/paddle/fluid/platform/float16_test.cc +++ b/paddle/fluid/platform/float16_test.cc @@ -13,8 +13,8 @@ limitations under the License. */ #include #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_tensor.h" +#include "paddle/fluid/platform/init.h" namespace paddle { namespace platform { @@ -141,10 +141,36 @@ TEST(float16, lod_tensor_cpu) { } } +TEST(float16, floating) { + // compile time assert. + PADDLE_ASSERT(std::is_floating_point::value); +} + TEST(float16, print) { float16 a = float16(1.0f); std::cout << a << std::endl; } +// CPU test +TEST(float16, isinf) { + float16 a; + a.x = 0x7c00; + float16 b = float16(INFINITY); + float16 c = static_cast(INFINITY); + EXPECT_EQ(std::isinf(a), true); + EXPECT_EQ(std::isinf(b), true); + EXPECT_EQ(std::isinf(c), true); +} + +TEST(float16, isnan) { + float16 a; + a.x = 0x7fff; + float16 b = float16(NAN); + float16 c = static_cast(NAN); + EXPECT_EQ(std::isnan(a), true); + EXPECT_EQ(std::isnan(b), true); + EXPECT_EQ(std::isnan(c), true); +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/float16_test.cu b/paddle/fluid/platform/float16_test.cu index 577fc24ceb..e2b7ca9b03 100644 --- a/paddle/fluid/platform/float16_test.cu +++ b/paddle/fluid/platform/float16_test.cu @@ -11,11 +11,13 @@ limitations under the License. */ #include "paddle/fluid/platform/float16.h" +#include #include +#include +#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor_util.h" -#include "paddle/utils/Logging.h" #define ARITHMETIC_KERNEL(op_type, sign) \ __global__ void op_type(const half* in1, const half* in2, half* out) { \ @@ -241,6 +243,72 @@ TEST(float16, lod_tensor_on_gpu) { } } +template +struct Functor { + bool operator()(const T& val) { + return std::type_index(typeid(T)) == + std::type_index(typeid(platform::float16)); + } +}; + +TEST(float16, typeid) { + // the framework heavily used typeid hash + Functor functor; + float16 a = float16(.0f); + Functor functor2; + int b(0); + + // compile time assert + PADDLE_ASSERT(functor(a) == true); + PADDLE_ASSERT(functor2(b) == false); +} + +// GPU test +TEST(float16, isinf) { + float16 a; + a.x = 0x7c00; + float16 b = float16(INFINITY); + // underflow to 0 + float16 native_a(5e-40f); + // overflow to inf + float16 native_b(5e40f); + EXPECT_EQ(std::isinf(a), true); + EXPECT_EQ(std::isinf(b), true); + EXPECT_EQ(std::isinf(native_b), true); + EXPECT_EQ(native_a, float16(0)); +} + +TEST(float16, isnan) { + float16 a; + a.x = 0x7fff; + float16 b = float16(NAN); + float16 c = float16(5e40); + // inf * +-0 will get a nan + float16 d = c * float16(0); + EXPECT_EQ(std::isnan(a), true); + EXPECT_EQ(std::isnan(b), true); + EXPECT_EQ(std::isnan(d), true); +} + +TEST(float16, cast) { + float16 a; + a.x = 0x0070; + auto b = a; + { + // change semantic, keep the same value + float16 c = reinterpret_cast(reinterpret_cast(b)); + EXPECT_EQ(b, c); + } + + { + // use uint32 low 16 bit store float16 + uint32_t c = reinterpret_cast(b); + float16 d; + d.x = c; + EXPECT_EQ(b, d); + } +} + } // namespace platform } // namespace paddle #endif // PADDLE_CUDA_FP16 diff --git a/paddle/fluid/framework/init.cc b/paddle/fluid/platform/init.cc similarity index 93% rename from paddle/fluid/framework/init.cc rename to paddle/fluid/platform/init.cc index 85beae775b..6f1f0c4796 100644 --- a/paddle/fluid/framework/init.cc +++ b/paddle/fluid/platform/init.cc @@ -16,12 +16,16 @@ limitations under the License. */ #include #include -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/cpu_helper.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/string/piece.h" +DEFINE_int32(paddle_num_threads, 1, + "Number of threads for each paddle instance."); + namespace paddle { namespace framework { @@ -113,6 +117,9 @@ void InitDevices(bool init_p2p, const std::vector devices) { } places.emplace_back(platform::CPUPlace()); platform::DeviceContextPool::Init(places); +#ifndef PADDLE_WITH_MKLDNN + platform::SetNumThreads(FLAGS_paddle_num_threads); +#endif } void InitGLOG(const std::string &prog_name) { diff --git a/paddle/fluid/framework/init.h b/paddle/fluid/platform/init.h similarity index 100% rename from paddle/fluid/framework/init.h rename to paddle/fluid/platform/init.h diff --git a/paddle/fluid/framework/init_test.cc b/paddle/fluid/platform/init_test.cc similarity index 96% rename from paddle/fluid/framework/init_test.cc rename to paddle/fluid/platform/init_test.cc index 928e2d14ab..eef1470a90 100644 --- a/paddle/fluid/framework/init_test.cc +++ b/paddle/fluid/platform/init_test.cc @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" TEST(InitDevices, CPU) { using paddle::framework::InitDevices; diff --git a/paddle/fluid/platform/mkldnn_helper.h b/paddle/fluid/platform/mkldnn_helper.h index 23f1d615da..10a3ad256b 100644 --- a/paddle/fluid/platform/mkldnn_helper.h +++ b/paddle/fluid/platform/mkldnn_helper.h @@ -14,8 +14,10 @@ limitations under the License. */ #pragma once #include +#include #include #include "paddle/fluid/framework/operator.h" +#include "paddle/fluid/platform/place.h" namespace paddle { namespace platform { @@ -38,6 +40,11 @@ void* to_void_cast(const Type* t) { return static_cast(const_cast(t)); } +template +void* to_void_reinterpret_cast(const Type* t) { + return reinterpret_cast(const_cast(t)); +} + template using tf_desc = typename Type::desc; @@ -71,5 +78,190 @@ inline bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx) { return use_mkldnn && platform::is_cpu_place(ctx.GetPlace()); } +template +mkldnn::memory::data_type MKLDNNGetDataType() { + return mkldnn::memory::data_undef; +} + +template <> +inline mkldnn::memory::data_type MKLDNNGetDataType() { + return mkldnn::memory::f32; +} + +inline void Reorder(const mkldnn::memory& src, const mkldnn::memory& dst) { + auto reorder_prim = mkldnn::reorder(src, dst); + std::vector pipeline; + pipeline.push_back(reorder_prim); + mkldnn::stream(mkldnn::stream::kind::eager).submit(pipeline).wait(); +} + +inline mkldnn::memory::format GetMKLDNNFormat(const mkldnn::memory memory) { + return static_cast( + memory.get_primitive_desc().desc().data.format); +} + +inline mkldnn::memory::format GetMKLDNNFormat( + const mkldnn::sum::primitive_desc& memory) { + return static_cast( + memory.dst_primitive_desc().desc().data.format); +} + +class MKLDNNHandler { + public: + MKLDNNHandler(const MKLDNNDeviceContext& dev_ctx, mkldnn::engine engine, + const std::string& base_key) + : dev_ctx_(dev_ctx), + engine_(engine), + key_(base_key), + is_reusing_(false) {} + + std::shared_ptr AcquireSrcMemory( + const mkldnn::memory::desc& md, void* ptr) { + return this->AcquireMemory(md, ptr, "@user_src_mem_p"); + } + + std::shared_ptr AcquireWeightsMemory( + const mkldnn::memory::desc& md, void* ptr) { + return this->AcquireMemory(md, ptr, "@user_weights_mem_p"); + } + + std::shared_ptr AcquireDstMemory( + const mkldnn::memory::desc& md, void* ptr) { + return this->AcquireMemory(md, ptr, "@user_dst_mem_p"); + } + + std::shared_ptr AcquireDiffDstMemory( + const mkldnn::memory::desc& md, void* ptr) { + return this->AcquireMemory(md, ptr, "@user_diff_dst_mem_p"); + } + + std::shared_ptr AcquireDiffSrcMemory( + const mkldnn::memory::desc& md, void* ptr) { + return this->AcquireMemory(md, ptr, "@user_diff_src_mem_p"); + } + + std::shared_ptr AcquireMemoryFromPrimitive( + mkldnn::memory::primitive_desc mdp, void* ptr, + const std::string& suffix) { + auto local_key = key_ + suffix; + auto mem_p = + std::static_pointer_cast(dev_ctx_.GetBlob(local_key)); + PADDLE_ENFORCE((mem_p != nullptr) || (is_reusing_ == false), + "Fail to find mem primitive in device context"); + if (mem_p == nullptr) { + mem_p = std::make_shared(mdp, ptr); + dev_ctx_.SetBlob(local_key, mem_p); + } else { + mem_p->set_data_handle(ptr); + // Mark that reusing happenned. All primitives from operator instance + // should be reused or none of them. So we check consistency + is_reusing_ = true; + } + return mem_p; + } + + std::shared_ptr AcquireMemory(const mkldnn::memory::desc& md, + void* ptr, + const std::string& suffix) { + /*Generate key*/ + auto local_key = key_ + suffix; + auto mem_p = + std::static_pointer_cast(dev_ctx_.GetBlob(local_key)); + PADDLE_ENFORCE((mem_p != nullptr) || (is_reusing_ == false), + "Fail to find mem primitive in device context"); + if (mem_p == nullptr) { + mem_p = std::make_shared( + mkldnn::memory::primitive_desc{md, engine_}, ptr); + dev_ctx_.SetBlob(local_key, mem_p); + } else { + mem_p->set_data_handle(ptr); + // Mark that reusing happenned. All primitives from operator instance + // should be reused or none of them. So we check consistency + is_reusing_ = true; + } + return mem_p; + } + + std::shared_ptr AcquireMemory( + mkldnn::memory::primitive_desc& mpd, // NOLINT + mkldnn::memory::primitive_desc& user_mpd, // NOLINT + const std::shared_ptr user_memory_p, + const std::string& suffix, + std::vector& pipeline) { // NOLINT + // create reorder primitive if the input format is not the preferred one + auto local_key = key_ + suffix; + auto key_reorder_p = key_ + suffix + "reorder_p"; + + auto target_memory_p = + std::static_pointer_cast(dev_ctx_.GetBlob(local_key)); + PADDLE_ENFORCE((target_memory_p != nullptr) || (is_reusing_ == false), + "Fail to find mem primitive in device context"); + if (target_memory_p == nullptr) { + target_memory_p = user_memory_p; + std::shared_ptr reorder_p; + if (mpd != user_mpd) { + target_memory_p = std::make_shared(mpd); + + auto reorder_p = + std::make_shared(*user_memory_p, *target_memory_p); + dev_ctx_.SetBlob(key_reorder_p, reorder_p); + pipeline.push_back(*reorder_p); + } + dev_ctx_.SetBlob(local_key, target_memory_p); + } else { + // Make reorder if needed + auto reorder_p = std::static_pointer_cast( + dev_ctx_.GetBlob(key_reorder_p)); + if (reorder_p != nullptr) { + pipeline.push_back(*reorder_p); + } + is_reusing_ = true; + } + return target_memory_p; + } + + static std::string GetHash(mkldnn::memory::dims& operand_dims, // NOLINT + const std::string& suffix) { + return dims2str(operand_dims) + suffix; + } + + protected: + static std::string dims2str(const mkldnn::memory::dims& operand_dims) { + std::string dstr = ""; + for (size_t i = 0; i < operand_dims.size(); ++i) { + dstr += std::to_string(operand_dims[i]) + "-"; + } + return dstr; + } + + protected: + const MKLDNNDeviceContext& dev_ctx_; + mkldnn::engine engine_; + std::string key_; + bool is_reusing_; +}; + +inline mkldnn::memory::format MKLDNNFormatForSize( + size_t dims_size, mkldnn::memory::format data_format) { + if (dims_size == 1) { + return mkldnn::memory::format::x; + } else if (dims_size == 2) { + return mkldnn::memory::format::nc; + } + return data_format; +} + +inline mkldnn::memory::format data_format_to_memory_format( + const std::string& data_format) { + switch (framework::StringToDataLayout(data_format)) { + case framework::DataLayout::kNHWC: + return mkldnn::memory::format::nhwc; + case framework::DataLayout::kNCHW: + return mkldnn::memory::format::nchw; + default: + return mkldnn::memory::format::any; + } +} + } // namespace platform } // namespace paddle diff --git a/paddle/fluid/platform/nccl_helper.h b/paddle/fluid/platform/nccl_helper.h index 0013597fd5..cc46c88fd1 100644 --- a/paddle/fluid/platform/nccl_helper.h +++ b/paddle/fluid/platform/nccl_helper.h @@ -14,12 +14,16 @@ #pragma once +#include +#include #include // NOLINT #include #include #include "paddle/fluid/platform/dynload/nccl.h" #include "paddle/fluid/platform/enforce.h" +#define NCCL_ID_VARNAME "NCCLID" + namespace paddle { namespace platform { @@ -37,6 +41,11 @@ inline ncclDataType_t ToNCCLDataType(std::type_index type) { } } +// NOTE(minqiyang): according to the ncclGroupEnd documentations: +// https://docs.nvidia.com/deeplearning/sdk/nccl-api/ncclapidoc.html, +// ncclGroupEnd will wait for all communicators to be initialized, which will +// cause blocking problem when a runtime_error was thrown, so try only guard +// NCCL actions when use it. class NCCLGroupGuard { public: static std::mutex &NCCLMutex() { @@ -50,7 +59,7 @@ class NCCLGroupGuard { } inline ~NCCLGroupGuard() { - PADDLE_ENFORCE(dynload::ncclGroupEnd()); + CHECK_EQ(dynload::ncclGroupEnd(), ncclSuccess); NCCLMutex().unlock(); } }; @@ -73,7 +82,9 @@ struct NCCLContextMap { std::unordered_map contexts_; std::vector order_; - explicit NCCLContextMap(const std::vector &places) { + explicit NCCLContextMap(const std::vector &places, + ncclUniqueId *nccl_id = nullptr, + size_t num_trainers = 1, size_t trainer_id = 0) { PADDLE_ENFORCE(!places.empty()); order_.reserve(places.size()); for (auto &p : places) { @@ -85,18 +96,34 @@ struct NCCLContextMap { order_.size(), contexts_.size(), "NCCL Context Map does not support contain two or more same device"); - if (places.size() > 1) { - std::unique_ptr comms(new ncclComm_t[order_.size()]); + if (places.size() <= 1) { + return; + } + std::unique_ptr comms(new ncclComm_t[order_.size()]); + // if pass nccl_id here, can assume we are doing multi node training + if (nccl_id == nullptr) { + std::lock_guard guard(NCCLGroupGuard::NCCLMutex()); + PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( + comms.get(), static_cast(order_.size()), order_.data())); + } else { + PADDLE_ENFORCE_GT(num_trainers, 1); + // TODO(wuyi): need to ensure each node have same number of GPUs { - std::lock_guard guard(NCCLGroupGuard::NCCLMutex()); - PADDLE_ENFORCE(platform::dynload::ncclCommInitAll( - comms.get(), static_cast(order_.size()), order_.data())); - } - int i = 0; - for (auto &dev_id : order_) { - contexts_.at(dev_id).comm_ = comms[i++]; + int nranks = num_trainers * order_.size(); + NCCLGroupGuard gurad; + for (auto &gpu_id : order_) { + int rank = trainer_id * order_.size() + gpu_id; + VLOG(3) << "init nccl rank: " << rank << " nranks: " << nranks; + PADDLE_ENFORCE(cudaSetDevice(gpu_id)); + PADDLE_ENFORCE(platform::dynload::ncclCommInitRank( + comms.get() + gpu_id, nranks, *nccl_id, rank)); + } } } + int i = 0; + for (auto &dev_id : order_) { + contexts_.at(dev_id).comm_ = comms[i++]; + } } NCCLContextMap(const NCCLContextMap &other) = delete; diff --git a/paddle/fluid/platform/place.h b/paddle/fluid/platform/place.h index ad54a87899..e3ee504f3d 100644 --- a/paddle/fluid/platform/place.h +++ b/paddle/fluid/platform/place.h @@ -30,6 +30,7 @@ struct CPUPlace { // needed for variant equality comparison inline bool operator==(const CPUPlace &) const { return true; } inline bool operator!=(const CPUPlace &) const { return false; } + inline bool operator<(const CPUPlace &) const { return false; } }; struct CUDAPlace { @@ -42,6 +43,7 @@ struct CUDAPlace { return device == o.device; } inline bool operator!=(const CUDAPlace &o) const { return !(*this == o); } + inline bool operator<(const CUDAPlace &o) const { return device < o.device; } int device; }; @@ -52,6 +54,7 @@ struct CUDAPinnedPlace { // needed for variant equality comparison inline bool operator==(const CUDAPinnedPlace &) const { return true; } inline bool operator!=(const CUDAPinnedPlace &) const { return false; } + inline bool operator<(const CUDAPinnedPlace &) const { return false; } }; struct IsCUDAPlace : public boost::static_visitor { @@ -89,18 +92,6 @@ bool is_cuda_pinned_place(const Place &); bool places_are_same_class(const Place &, const Place &); bool is_same_place(const Place &, const Place &); -struct PlaceHash { - std::size_t operator()(const Place &p) const { - constexpr size_t num_dev_bits = 4; - std::hash ihash; - size_t dev_id = 0; - if (is_gpu_place(p)) { - dev_id = boost::get(p).device; - } - return ihash(dev_id << num_dev_bits | p.which()); - } -}; - std::ostream &operator<<(std::ostream &, const Place &); template diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index 50bc0aba6a..652a6ec7a4 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -15,7 +15,6 @@ limitations under the License. */ #include "paddle/fluid/platform/profiler.h" #include -#include #include #include #include @@ -38,6 +37,7 @@ struct EventList; static int64_t profiler_lister_id = 0; static bool should_send_profile_state = false; +std::mutex profiler_mu; // The profiler state, the initial value is ProfilerState::kDisabled static ProfilerState g_state = ProfilerState::kDisabled; @@ -96,12 +96,6 @@ inline uint64_t GetTimeInNsec() { .count(); } -inline uint64_t PosixInNsec() { - struct timeval tv; - gettimeofday(&tv, nullptr); - return 1000 * (static_cast(tv.tv_sec) * 1000000 + tv.tv_usec); -} - Event::Event(EventType type, std::string name, uint32_t thread_id, const DeviceContext* dev_ctx) : type_(type), name_(name), thread_id_(thread_id), has_cuda_(false) { @@ -109,6 +103,8 @@ Event::Event(EventType type, std::string name, uint32_t thread_id, has_cuda_ = dev_ctx ? platform::is_gpu_place(dev_ctx->GetPlace()) : false; if (has_cuda_) { auto* cuda_dev_ctx = static_cast(dev_ctx); + PADDLE_ENFORCE(cudaSetDevice( + boost::get(cuda_dev_ctx->GetPlace()).device)); PADDLE_ENFORCE(cudaGetDevice(&device_)); PADDLE_ENFORCE(cudaEventCreate(&event_)); auto stream = cuda_dev_ctx->stream(); @@ -126,6 +122,7 @@ double Event::CpuElapsedMs(const Event& e) const { double Event::CudaElapsedMs(const Event& e) const { #ifdef PADDLE_WITH_CUDA + if (!has_cuda_) return 0.0; PADDLE_ENFORCE(e.has_cuda() && has_cuda()); PADDLE_ENFORCE(e.device() == device()); PADDLE_ENFORCE(cudaEventSynchronize(event_)); @@ -173,8 +170,10 @@ void PopEvent(const std::string& name, const DeviceContext* dev_ctx) { } RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) - : start_ns_(PosixInNsec()) { + : is_enabled_(false), start_ns_(PosixInNsec()) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; + is_enabled_ = true; dev_ctx_ = dev_ctx; name_ = name; PushEvent(name_, dev_ctx_); @@ -183,48 +182,45 @@ RecordEvent::RecordEvent(const std::string& name, const DeviceContext* dev_ctx) } RecordEvent::~RecordEvent() { - if (g_state == ProfilerState::kDisabled) return; + std::lock_guard l(profiler_mu); + if (g_state == ProfilerState::kDisabled || !is_enabled_) return; DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { tracer->AddCPURecords(CurAnnotation(), start_ns_, PosixInNsec(), - BlockDepth(), CurThread()); + BlockDepth(), g_thread_id); } ClearCurAnnotation(); PopEvent(name_, dev_ctx_); } -RecordBlock::RecordBlock(int block_id) : start_ns_(PosixInNsec()) { +RecordBlock::RecordBlock(int block_id) + : is_enabled_(false), start_ns_(PosixInNsec()) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; + is_enabled_ = true; SetCurBlock(block_id); name_ = string::Sprintf("block_%d", block_id); } RecordBlock::~RecordBlock() { - if (g_state == ProfilerState::kDisabled) return; + std::lock_guard l(profiler_mu); + if (g_state == ProfilerState::kDisabled || !is_enabled_) return; DeviceTracer* tracer = GetDeviceTracer(); if (tracer) { // We try to put all blocks at the same nested depth in the // same timeline lane. and distinguish the using thread_id. tracer->AddCPURecords(name_, start_ns_, PosixInNsec(), BlockDepth(), - CurThread()); + g_thread_id); } ClearCurBlock(); } -RecordThread::RecordThread(int thread_id) { - if (g_state == ProfilerState::kDisabled) return; - SetCurThread(thread_id); -} - -RecordThread::~RecordThread() { - if (g_state == ProfilerState::kDisabled) return; - ClearCurThread(); -} - void EnableProfiler(ProfilerState state) { PADDLE_ENFORCE(state != ProfilerState::kDisabled, "Can't enbale profling, since the input state is ", "ProfilerState::kDisabled"); + + std::lock_guard l(profiler_mu); if (state == g_state) { return; } @@ -274,12 +270,13 @@ struct EventItem { double min_time; double max_time; double ave_time; + float ratio; }; // Print results void PrintProfiler(const std::vector>& events_table, const std::string& sorted_domain, const size_t name_width, - const size_t data_width) { + const size_t data_width, double total) { // Output header information std::cout << "\n------------------------->" << " Profiling Report " @@ -292,7 +289,7 @@ void PrintProfiler(const std::vector>& events_table, } else if (g_state == ProfilerState::kAll) { place = "All"; } else { - PADDLE_THROW("Invalid profiler state"); + PADDLE_THROW("Invalid profiler state", g_state); } std::cout << "Place: " << place << std::endl; @@ -304,7 +301,8 @@ void PrintProfiler(const std::vector>& events_table, std::cout << std::setw(name_width) << "Event" << std::setw(data_width) << "Calls" << std::setw(data_width) << "Total" << std::setw(data_width) << "Min." << std::setw(data_width) - << "Max." << std::setw(data_width) << "Ave." << std::endl; + << "Max." << std::setw(data_width) << "Ave." + << std::setw(data_width) << "Ratio." << std::endl; for (size_t i = 0; i < events_table.size(); ++i) { for (size_t j = 0; j < events_table[i].size(); ++j) { const EventItem& event_item = events_table[i][j]; @@ -313,7 +311,9 @@ void PrintProfiler(const std::vector>& events_table, << std::setw(data_width) << event_item.total_time << std::setw(data_width) << event_item.min_time << std::setw(data_width) << event_item.max_time - << std::setw(data_width) << event_item.ave_time << std::endl; + << std::setw(data_width) << event_item.ave_time + << std::setw(data_width) << event_item.total_time / total + << std::endl; } } std::cout << std::endl; @@ -363,6 +363,7 @@ void ParseEvents(const std::vector>& events, std::vector> events_table; size_t max_name_width = 0; + double total = 0.; // the total time for (size_t i = 0; i < events.size(); i++) { std::list pushed_events; std::vector event_items; @@ -383,6 +384,7 @@ void ParseEvents(const std::vector>& events, g_state == ProfilerState::kAll) ? rit->CudaElapsedMs(events[i][j]) : rit->CpuElapsedMs(events[i][j]); + total += event_time; std::string event_name = "thread" + std::to_string(rit->thread_id()) + "::" + rit->name(); @@ -391,7 +393,8 @@ void ParseEvents(const std::vector>& events, if (event_idx.find(event_name) == event_idx.end()) { event_idx[event_name] = event_items.size(); EventItem event_item = {event_name, 1, event_time, - event_time, event_time, event_time}; + event_time, event_time, event_time, + 0.}; event_items.push_back(event_item); } else { int index = event_idx[event_name]; @@ -435,11 +438,12 @@ void ParseEvents(const std::vector>& events, } // Print report - PrintProfiler(events_table, sorted_domain, max_name_width + 4, 12); + PrintProfiler(events_table, sorted_domain, max_name_width + 4, 12, total); } void DisableProfiler(EventSortingKey sorted_key, const std::string& profile_path) { + std::lock_guard l(profiler_mu); if (g_state == ProfilerState::kDisabled) return; // Mark the profiling stop. Mark("_stop_profiler_", nullptr); @@ -463,7 +467,7 @@ void SetProfileListener() { std::mt19937 rng; rng.seed(std::random_device()()); std::uniform_int_distribution dist6( - 1, std::numeric_limits::max()); + 1, std::numeric_limits::max()); profiler_lister_id = dist6(rng); } int64_t ListenerId() { return profiler_lister_id; } diff --git a/paddle/fluid/platform/profiler.h b/paddle/fluid/platform/profiler.h index 61b98143e4..c99d9c807d 100644 --- a/paddle/fluid/platform/profiler.h +++ b/paddle/fluid/platform/profiler.h @@ -74,6 +74,7 @@ struct RecordEvent { ~RecordEvent(); + bool is_enabled_; uint64_t start_ns_; // The device context is used by Event to get the current cuda stream. const DeviceContext* dev_ctx_; @@ -89,15 +90,11 @@ struct RecordBlock { ~RecordBlock(); private: + bool is_enabled_; std::string name_; uint64_t start_ns_; }; -struct RecordThread { - explicit RecordThread(int thread_id); - ~RecordThread(); -}; - // Return the event list of all threads. Assumed the returned value calls // event_lists, event_lists[i][j] represents the j-th Event of i-th thread. std::vector> GetAllEvents(); @@ -114,6 +111,8 @@ void ResetProfiler(); void DisableProfiler(EventSortingKey sorted_key, const std::string& profile_path); +const int kEnableProfiler = 1; +const int kDisableProfiler = 2; // Test if the profiler is currently enabled. bool IsProfileEnabled(); // Whether the trainer should send profiling state to PS. diff --git a/paddle/fluid/platform/variant.h b/paddle/fluid/platform/variant.h index 45f60fc9d7..dc9fad29f2 100644 --- a/paddle/fluid/platform/variant.h +++ b/paddle/fluid/platform/variant.h @@ -38,6 +38,7 @@ limitations under the License. */ #endif #endif +#include #include #include #include diff --git a/paddle/fluid/pybind/CMakeLists.txt b/paddle/fluid/pybind/CMakeLists.txt index 4fef351c21..89ca4f7812 100644 --- a/paddle/fluid/pybind/CMakeLists.txt +++ b/paddle/fluid/pybind/CMakeLists.txt @@ -2,13 +2,13 @@ if(WITH_PYTHON) if(WITH_AMD_GPU) hip_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) else() cc_library(paddle_pybind SHARED SRCS pybind.cc exception.cc protobuf.cc const_value.cc recordio.cc - DEPS pybind python proto_desc memory executor prune init profiler feed_fetch_method + DEPS pybind python proto_desc memory executor prune profiler feed_fetch_method parallel_executor ${GLOB_OP_LIB}) if(NOT APPLE AND NOT ANDROID) diff --git a/paddle/fluid/pybind/const_value.cc b/paddle/fluid/pybind/const_value.cc index 3f28e61649..76aa7d2010 100644 --- a/paddle/fluid/pybind/const_value.cc +++ b/paddle/fluid/pybind/const_value.cc @@ -13,6 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/pybind/const_value.h" +#include #include "paddle/fluid/framework/operator.h" namespace paddle { @@ -23,6 +24,22 @@ void BindConstValue(pybind11::module* m) { m->def("kTempVarName", [] { return framework::kTempVarName; }); m->def("kGradVarSuffix", [] { return framework::kGradVarSuffix; }); m->def("kZeroVarSuffix", [] { return framework::kZeroVarSuffix; }); + + auto op_proto_and_checker_maker = + m->def_submodule("op_proto_and_checker_maker"); + + pybind11::enum_(op_proto_and_checker_maker, "OpRole") + .value("Forward", framework::OpRole::kForward) + .value("Backward", framework::OpRole::kBackward) + .value("Optimize", framework::OpRole::kOptimize) + .value("Loss", framework::OpRole::kLoss) + .value("RPC", framework::OpRole::kRPC); + + op_proto_and_checker_maker.def( + "kOpRoleAttrName", framework::OpProtoAndCheckerMaker::OpRoleAttrName); + op_proto_and_checker_maker.def( + "kOpRoleVarAttrName", + framework::OpProtoAndCheckerMaker::OpRoleVarAttrName); } } // namespace pybind diff --git a/paddle/fluid/pybind/exception.cc b/paddle/fluid/pybind/exception.cc index 08a2f185e1..831f30e35f 100644 --- a/paddle/fluid/pybind/exception.cc +++ b/paddle/fluid/pybind/exception.cc @@ -18,10 +18,13 @@ namespace paddle { namespace pybind { void BindException(pybind11::module* m) { + static pybind11::exception eof(*m, "EOFException"); static pybind11::exception exc(*m, "EnforceNotMet"); pybind11::register_exception_translator([](std::exception_ptr p) { try { if (p) std::rethrow_exception(p); + } catch (const platform::EOFException& e) { + eof(e.what()); } catch (const platform::EnforceNotMet& e) { exc(e.what()); } diff --git a/paddle/fluid/pybind/protobuf.cc b/paddle/fluid/pybind/protobuf.cc index 6471eb3ab7..be623703c2 100644 --- a/paddle/fluid/pybind/protobuf.cc +++ b/paddle/fluid/pybind/protobuf.cc @@ -145,14 +145,14 @@ void BindBlockDesc(pybind11::module *m) { .def_property_readonly("id", &pd::BlockDesc::ID) .def_property_readonly("parent", &pd::BlockDesc::Parent) .def("get_forward_block_idx", &pd::BlockDesc::ForwardBlockID) - .def("set_forward_block_idx", &pd::BlockDesc::SetForwardBlockID) + .def("_set_forward_block_idx", &pd::BlockDesc::SetForwardBlockID) .def("append_op", &pd::BlockDesc::AppendOp, pybind11::return_value_policy::reference) - .def("prepend_op", &pd::BlockDesc::PrependOp, + .def("_prepend_op", &pd::BlockDesc::PrependOp, pybind11::return_value_policy::reference) - .def("insert_op", &pd::BlockDesc::InsertOp, + .def("_insert_op", &pd::BlockDesc::InsertOp, pybind11::return_value_policy::reference) - .def("remove_op", &pd::BlockDesc::RemoveOp) + .def("_remove_op", &pd::BlockDesc::RemoveOp) .def("var", [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; @@ -165,7 +165,7 @@ void BindBlockDesc(pybind11::module *m) { return self.HasVar(name); }, pybind11::return_value_policy::reference) - .def("rename_var", + .def("_rename_var", [](pd::BlockDesc &self, const pybind11::bytes &byte_name, const pybind11::bytes &byte_name_new) { std::string name = byte_name; @@ -189,7 +189,7 @@ void BindBlockDesc(pybind11::module *m) { return self.FindVarRecursive(name); }, pybind11::return_value_policy::reference) - .def("remove_var", + .def("_remove_var", [](pd::BlockDesc &self, pybind11::bytes byte_name) { std::string name = byte_name; return self.RemoveVar(name); @@ -238,6 +238,7 @@ void BindVarDsec(pybind11::module *m) { pybind11::enum_(var_desc, "VarType", "") .value("BOOL", pd::proto::VarType::BOOL) + .value("UINT8", pd::proto::VarType::UINT8) .value("INT16", pd::proto::VarType::INT16) .value("INT32", pd::proto::VarType::INT32) .value("INT64", pd::proto::VarType::INT64) @@ -267,7 +268,8 @@ void BindOpDesc(pybind11::module *m) { .value("STRINGS", pd::proto::AttrType::STRINGS) .value("BOOL", pd::proto::AttrType::BOOLEAN) .value("BOOLS", pd::proto::AttrType::BOOLEANS) - .value("BLOCK", pd::proto::AttrType::BLOCK); + .value("BLOCK", pd::proto::AttrType::BLOCK) + .value("BLOCKS", pd::proto::AttrType::BLOCKS); pybind11::class_ op_desc(*m, "OpDesc", ""); op_desc @@ -292,13 +294,15 @@ void BindOpDesc(pybind11::module *m) { .def("set_attr", &pd::OpDesc::SetAttr) .def("attr", &pd::OpDesc::GetAttr) .def("set_block_attr", &pd::OpDesc::SetBlockAttr) + .def("set_blocks_attr", &pd::OpDesc::SetBlocksAttr) .def("set_serialized_attr", [](pd::OpDesc &self, const std::string &name, const pybind11::bytes &seriralized) { std::string ser(seriralized); self.SetAttr(name, ser); }) - .def("block_attr", &pd::OpDesc::GetBlockAttr) + .def("block_attr_id", &pd::OpDesc::GetBlockAttrId) + .def("blocks_attr_ids", &pd::OpDesc::GetBlocksAttrIds) .def("check_attrs", &pd::OpDesc::CheckAttrs) .def("infer_shape", &pd::OpDesc::InferShape) .def("infer_var_type", &pd::OpDesc::InferVarType) diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index c925686f83..7127bb38f6 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include #include #include +#include #include // NOLINT // for call_once #include #include @@ -24,7 +25,6 @@ limitations under the License. */ #include "paddle/fluid/framework/executor.h" #include "paddle/fluid/framework/feed_fetch_method.h" #include "paddle/fluid/framework/framework.pb.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/framework/lod_rank_table.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/lod_tensor_array.h" @@ -34,7 +34,9 @@ limitations under the License. */ #include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/selected_rows.h" #include "paddle/fluid/operators/activation_op.h" +#include "paddle/fluid/operators/reader/lod_tensor_blocking_queue.h" #include "paddle/fluid/platform/enforce.h" +#include "paddle/fluid/platform/init.h" #include "paddle/fluid/platform/place.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/fluid/pybind/const_value.h" @@ -65,6 +67,14 @@ bool IsCompiledWithCUDA() { #endif } +bool IsCompiledWithDIST() { +#ifdef PADDLE_WITH_DISTRIBUTE + return true; +#else + return false; +#endif +} + PYBIND11_PLUGIN(core) { py::module m("core", "C++ core of PaddlePaddle"); @@ -77,37 +87,37 @@ PYBIND11_PLUGIN(core) { py::class_(m, "Tensor", py::buffer_protocol()) .def_buffer( [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) - .def("get_dims", + .def("_get_dims", [](const Tensor &self) { return vectorize(self.dims()); }) - .def("set_dims", + .def("_set_dims", [](Tensor &self, const std::vector &dim) { self.Resize(make_ddim(dim)); }) - .def("set_layout", + .def("_set_layout", [](Tensor &self, const std::string &layout) { self.set_layout(StringToDataLayout(layout)); }) - .def("alloc_float", + .def("_alloc_float", [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) - .def("alloc_float", + .def("_alloc_float", [](Tensor &self, paddle::platform::CPUPlace &place) { self.mutable_data(place); }) - .def("alloc_int", + .def("_alloc_int", [](Tensor &self, paddle::platform::CPUPlace &place) { self.mutable_data(place); }) - .def("alloc_int", + .def("_alloc_int", [](Tensor &self, paddle::platform::CUDAPlace &place) { self.mutable_data(place); }) - .def("alloc_int", + .def("_alloc_int", [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { self.mutable_data(place); }) - .def("alloc_float", + .def("_alloc_float", [](Tensor &self, paddle::platform::CUDAPinnedPlace &place) { self.mutable_data(place); }) @@ -117,6 +127,7 @@ PYBIND11_PLUGIN(core) { .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) .def("set", PyCPUTensorSetFromArray) + .def("set", PyCPUTensorSetFromArray) #ifdef PADDLE_WITH_CUDA .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) @@ -124,45 +135,94 @@ PYBIND11_PLUGIN(core) { .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDATensorSetFromArray) + .def("set", PyCUDATensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) .def("set", PyCUDAPinnedTensorSetFromArray) + .def("set", PyCUDAPinnedTensorSetFromArray) #endif .def("shape", [](Tensor &self) { return vectorize(self.dims()); }) - .def("set_float_element", TensorSetElement) - .def("get_float_element", TensorGetElement) - .def("set_double_element", TensorSetElement) - .def("get_double_element", TensorGetElement) - .def("dtype", [](Tensor &self) { return ToDataType(self.type()); }); + .def("_set_float_element", TensorSetElement) + .def("_get_float_element", TensorGetElement) + .def("_set_double_element", TensorSetElement) + .def("_get_double_element", TensorGetElement) + .def("_dtype", [](Tensor &self) { return ToDataType(self.type()); }); py::class_(m, "LoDTensor") .def_buffer( [](Tensor &self) -> py::buffer_info { return CastToPyBuffer(self); }) - .def( - "__init__", - [](LoDTensor &instance, const std::vector> &lod) { - LoD new_lod; - new_lod.reserve(lod.size()); - std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); - new (&instance) LoDTensor(new_lod); - }) + .def("__init__", + [](LoDTensor &instance, const std::vector> + &recursive_sequence_lengths) { + LoD new_lod; + new_lod.reserve(recursive_sequence_lengths.size()); + std::copy(recursive_sequence_lengths.begin(), + recursive_sequence_lengths.end(), + std::back_inserter(new_lod)); + LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod); + PADDLE_ENFORCE( + CheckLoD(new_offset_lod, -1), + "the provided recursive_sequence_lengths info is invalid"); + new (&instance) LoDTensor(new_offset_lod); + }) .def("__init__", [](LoDTensor &instance) { new (&instance) LoDTensor(); }) + // We implement offset based LOD in C++ while we use length based with + // Python API. So we changed set_lod to set_recursive_sequence_lengths to + // avoid misuse. + // The discussion is here: + // https://github.com/PaddlePaddle/Paddle/issues/10855 .def("set_lod", [](LoDTensor &self, const std::vector> &lod) { + // the input lod is offset-based level-of-detail info LoD new_lod; new_lod.reserve(lod.size()); std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + PADDLE_ENFORCE(CheckLoD(new_lod, vectorize(self.dims()).front()), + "the provided lod info is invalid"); self.set_lod(new_lod); }) - .def("lod", [](LoDTensor &self) -> std::vector> { - auto lod = self.lod(); - std::vector> new_lod; - new_lod.reserve(lod.size()); - std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); - return new_lod; + .def("set_recursive_sequence_lengths", + [](LoDTensor &self, const std::vector> + &recursive_sequence_lengths) { + // the input recursive_sequence_lengths is length-based + // level-of-detail info + LoD new_lod; + new_lod.reserve(recursive_sequence_lengths.size()); + std::copy(recursive_sequence_lengths.begin(), + recursive_sequence_lengths.end(), + std::back_inserter(new_lod)); + LoD new_offset_lod = ConvertToOffsetBasedLoD(new_lod); + PADDLE_ENFORCE( + CheckLoD(new_offset_lod, vectorize(self.dims()).front()), + "the provided recursive_sequence_lengths info is invalid"); + self.set_lod(new_offset_lod); + }) + .def("lod", + [](LoDTensor &self) -> std::vector> { + // output the offset-based lod info + LoD lod = self.lod(); + std::vector> new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + return new_lod; + }) + // Set above comments of set_lod. + .def("recursive_sequence_lengths", + [](LoDTensor &self) -> std::vector> { + // output the length-based lod info + LoD lod = ConvertToLengthBasedLoD(self.lod()); + std::vector> new_lod; + new_lod.reserve(lod.size()); + std::copy(lod.begin(), lod.end(), std::back_inserter(new_lod)); + return new_lod; + }) + .def("has_valid_recursive_sequence_lengths", [](LoDTensor &self) -> bool { + // Check that the lod info is valid and match the outermost + // dimension of the LoDTensor data + return CheckLoD(self.lod(), vectorize(self.dims()).front()); }); py::class_(m, "SelectedRows") @@ -188,15 +248,11 @@ PYBIND11_PLUGIN(core) { #endif }) .def("rows", [](SelectedRows &self) { -#ifndef PADDLE_WITH_CUDA - return self.rows(); -#else - auto rows = self.rows(); - std::vector new_rows; - new_rows.reserve(rows.size()); - std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows)); - return new_rows; -#endif + auto rows = self.rows(); + std::vector new_rows; + new_rows.reserve(rows.size()); + std::copy(rows.begin(), rows.end(), std::back_inserter(new_rows)); + return new_rows; }); py::class_(m, "Variable", R"DOC(Variable Class. @@ -245,7 +301,39 @@ All parameter, weight, gradient are variables in Paddle. py::return_value_policy::reference); py::class_(m, "Reader", "") - .def("reset", &framework::ReaderHolder::ReInit); + .def("reset", &framework::ReaderHolder::ResetAll); + + using LoDTensorBlockingQueue = + ::paddle::operators::reader::LoDTensorBlockingQueue; + using LoDTensorBlockingQueueHolder = + ::paddle::operators::reader::LoDTensorBlockingQueueHolder; + py::class_>( + m, "LoDTensorBlockingQueue", "") + .def("push", + [](LoDTensorBlockingQueue &self, + const std::vector &lod_tensor_vec) { + pybind11::gil_scoped_release release; + return self.Push(lod_tensor_vec); + }) + .def("size", &LoDTensorBlockingQueue::Size) + .def("capacity", &LoDTensorBlockingQueue::Cap) + .def("close", &LoDTensorBlockingQueue::Close) + .def("is_closed", &LoDTensorBlockingQueue::IsClosed); + + m.def("init_lod_tensor_blocking_queue", + [](Variable &var, size_t capacity, + const std::vector> &shapes) + -> std::shared_ptr { + std::vector dims(shapes.size()); + std::transform(shapes.begin(), shapes.end(), dims.begin(), + [](const std::vector &shape) { + return make_ddim(shape); + }); + auto *holder = var.GetMutable(); + holder->InitOnce(capacity, dims); + return holder->GetQueue(); + }, + py::return_value_policy::copy); py::class_(m, "Scope", "") .def("var", @@ -306,8 +394,10 @@ All parameter, weight, gradient are variables in Paddle. InferenceOptimize(*(origin.Proto()), &pruned_desc); return new ProgramDesc(pruned_desc); }); - m.def("empty_var_name", []() { return framework::kEmptyVarName; }); - m.def("grad_var_suffix", []() { return framework::kGradVarSuffix; }); + m.def("empty_var_name", + []() { return std::string(framework::kEmptyVarName); }); + m.def("grad_var_suffix", + []() { return std::string(framework::kGradVarSuffix); }); m.def_submodule( "var_names", "The module will return special predefined variable name in Paddle") @@ -410,9 +500,12 @@ All parameter, weight, gradient are variables in Paddle. py::class_(m, "Executor") .def(py::init()) - .def("run", - (void (Executor::*)(const ProgramDesc &, Scope *, int, bool, bool)) & - Executor::Run); + .def("close", &Executor::Close) + .def("run", [](Executor &self, const ProgramDesc &prog, Scope *scope, + int block_id, bool create_local_scope, bool create_vars) { + pybind11::gil_scoped_release release; + self.Run(prog, scope, block_id, create_local_scope, create_vars); + }); m.def("init_gflags", framework::InitGflags); m.def("init_glog", framework::InitGLOG); @@ -420,6 +513,7 @@ All parameter, weight, gradient are variables in Paddle. [](bool init_p2p) { framework::InitDevices(init_p2p); }); m.def("is_compiled_with_cuda", IsCompiledWithCUDA); + m.def("is_compiled_with_dist", IsCompiledWithDIST); #ifdef PADDLE_WITH_CUDA m.def("is_float16_supported", [](const platform::CUDAPlace &place) -> bool { // Only GPUs with Compute Capability >= 53 support float16 @@ -446,6 +540,8 @@ All parameter, weight, gradient are variables in Paddle. }); py::class_(m, "LoDTensorArray") + .def("__init__", + [](LoDTensorArray &instance) { new (&instance) LoDTensorArray(); }) .def("__getitem__", [](LoDTensorArray &self, size_t i) { return &self.at(i); }, py::return_value_policy::reference) @@ -492,23 +588,83 @@ All parameter, weight, gradient are variables in Paddle. m.def("enable_profiler", platform::EnableProfiler); m.def("disable_profiler", platform::DisableProfiler); + m.def("is_profiler_enabled", platform::IsProfileEnabled); m.def("reset_profiler", platform::ResetProfiler); - py::class_(m, "ParallelExecutor") - .def("__init__", - [](ParallelExecutor &self, size_t num_threads, bool use_event, - const std::vector &places, - const std::unordered_set ¶ms, - const std::unordered_set &bcast_vars, - const ProgramDesc &main_program, const std::string &loss_var_name, - Scope *scope, std::vector &local_scopes, - bool allow_op_delay, bool use_default_grad_scale) { - new (&self) ParallelExecutor( - num_threads, use_event, places, params, bcast_vars, - main_program, loss_var_name, scope, local_scopes, - allow_op_delay, use_default_grad_scale); - }) - .def("bcast_params", &ParallelExecutor::BCastParamsToGPUs) + // -- python binds for parallel executor. + py::class_ pe(m, "ParallelExecutor"); + py::class_(pe, "ExecutionStrategy") + .def(py::init()) + .def_property( + "num_threads", + [](const ExecutionStrategy &self) { return self.num_threads_; }, + [](ExecutionStrategy &self, size_t num_threads) { + self.num_threads_ = num_threads; + }) + .def_property( + "use_cuda", + [](const ExecutionStrategy &self) { return self.use_cuda_; }, + [](ExecutionStrategy &self, bool use_cuda) { + self.use_cuda_ = use_cuda; + }) + .def_property( + "allow_op_delay", + [](const ExecutionStrategy &self) { return self.allow_op_delay_; }, + [](ExecutionStrategy &self, bool allow_op_delay) { + self.allow_op_delay_ = allow_op_delay; + }) + .def_property( + "num_iteration_per_drop_scope", + [](const ExecutionStrategy &self) { + return self.num_iteration_per_drop_scope_; + }, + [](ExecutionStrategy &self, size_t num_iteration_per_drop_scope) { + self.num_iteration_per_drop_scope_ = num_iteration_per_drop_scope; + }); + py::class_ build_strategy(pe, "BuildStrategy"); + + py::enum_(build_strategy, "ReduceStrategy") + .value("Reduce", BuildStrategy::ReduceStrategy::kReduce) + .value("AllReduce", BuildStrategy::ReduceStrategy::kAllReduce); + py::enum_(build_strategy, + "GradientScaleStrategy") + .value("CoeffNumDevice", + BuildStrategy::GradientScaleStrategy::kCoeffNumDevice) + .value("One", BuildStrategy::GradientScaleStrategy::kOne) + .value("Customized", BuildStrategy::GradientScaleStrategy::kCustomized); + + build_strategy.def(py::init()) + .def_property( + "reduce_strategy", + [](const BuildStrategy &self) { return self.reduce_; }, + [](BuildStrategy &self, BuildStrategy::ReduceStrategy strategy) { + self.reduce_ = strategy; + }) + .def_property( + "gradient_scale_strategy", + [](const BuildStrategy &self) { return self.gradient_scale_; }, + [](BuildStrategy &self, + BuildStrategy::GradientScaleStrategy strategy) { + self.gradient_scale_ = strategy; + }) + .def_property( + "debug_graphviz_path", + [](const BuildStrategy &self) { return self.debug_graphviz_path_; }, + [](BuildStrategy &self, const std::string &path) { + self.debug_graphviz_path_ = path; + }) + .def_property( + "enable_data_balance", + [](const BuildStrategy &self) { return self.enable_data_balance_; }, + [](BuildStrategy &self, bool b) { self.enable_data_balance_ = b; }); + + pe.def(py::init &, + const std::unordered_set &, + const std::unordered_set &, const ProgramDesc &, + const std::string &, Scope *, std::vector &, + const ExecutionStrategy &, const BuildStrategy &, size_t, + size_t>()) + .def("_bcast_params", &ParallelExecutor::BCastParamsToDevices) // NOTE: even we return a vec* to Python use reference policy. // We still cannot get local_scope from this vector, since the element // of vec will be freed by Python GC. We can only return Scope* @@ -522,7 +678,12 @@ All parameter, weight, gradient are variables in Paddle. &ParallelExecutor::FeedTensorsIntoLocalScopes) .def("feed_and_split_tensor_into_local_scopes", &ParallelExecutor::FeedAndSplitTensorIntoLocalScopes) - .def("run", &ParallelExecutor::Run); + .def("run", [](ParallelExecutor &self, + const std::vector &fetch_tensors, + const std::string &fetched_var_name) { + pybind11::gil_scoped_release release; + self.Run(fetch_tensors, fetched_var_name); + }); BindRecordIOWriter(&m); return m.ptr(); diff --git a/paddle/fluid/pybind/recordio.cc b/paddle/fluid/pybind/recordio.cc index 330d104e0a..f83b026d4d 100644 --- a/paddle/fluid/pybind/recordio.cc +++ b/paddle/fluid/pybind/recordio.cc @@ -30,7 +30,9 @@ class RecordIOWriter { public: RecordIOWriter(const std::string& filename, recordio::Compressor compressor, size_t max_num_record) - : stream_(filename), writer_(&stream_, compressor, max_num_record) {} + : closed_(false), + stream_(filename), + writer_(&stream_, compressor, max_num_record) {} void AppendTensor(const framework::LoDTensor& tensor) { tensors_.push_back(tensor); @@ -47,9 +49,17 @@ class RecordIOWriter { PADDLE_ENFORCE(tensors_.empty()); writer_.Flush(); stream_.close(); + closed_ = true; + } + + ~RecordIOWriter() { + if (!closed_) { + Close(); + } } private: + bool closed_; std::vector tensors_; std::ofstream stream_; recordio::Writer writer_; diff --git a/paddle/fluid/pybind/tensor_py.h b/paddle/fluid/pybind/tensor_py.h index 93b09ed692..3e2ea1ef88 100644 --- a/paddle/fluid/pybind/tensor_py.h +++ b/paddle/fluid/pybind/tensor_py.h @@ -97,7 +97,7 @@ struct CastToPyBufferImpl { inline pybind11::buffer_info CastToPyBuffer(const framework::Tensor &tensor) { auto buffer_info = details::CastToPyBufferImpl()(tensor); + uint8_t, platform::float16>()(tensor); return buffer_info; } @@ -146,7 +146,7 @@ void PyCPUTensorSetFromArray( template <> // This following specialization maps uint16_t in the parameter type to // platform::float16. -void PyCPUTensorSetFromArray( +inline void PyCPUTensorSetFromArray( framework::Tensor *self, pybind11::array_t @@ -185,7 +185,7 @@ void PyCUDATensorSetFromArray( template <> // This following specialization maps uint16_t in the parameter type to // platform::float16. -void PyCUDATensorSetFromArray( +inline void PyCUDATensorSetFromArray( framework::Tensor *self, pybind11::array_t @@ -224,7 +224,7 @@ void PyCUDAPinnedTensorSetFromArray( template <> // This following specialization maps uint16_t in the parameter type to // platform::float16. -void PyCUDAPinnedTensorSetFromArray( +inline void PyCUDAPinnedTensorSetFromArray( framework::Tensor *self, pybind11::array_t diff --git a/paddle/fluid/recordio/chunk.cc b/paddle/fluid/recordio/chunk.cc index 82d9aa601c..6c65d9160c 100644 --- a/paddle/fluid/recordio/chunk.cc +++ b/paddle/fluid/recordio/chunk.cc @@ -119,40 +119,56 @@ bool Chunk::Write(std::ostream& os, Compressor ct) const { } bool Chunk::Parse(std::istream& sin) { - Header hdr; - bool ok = hdr.Parse(sin); + ChunkParser parser(sin); + if (!parser.Init()) { + return false; + } + Clear(); + while (parser.HasNext()) { + Add(parser.Next()); + } + return true; +} + +ChunkParser::ChunkParser(std::istream& sin) : in_(sin) {} +bool ChunkParser::Init() { + pos_ = 0; + bool ok = header_.Parse(in_); if (!ok) { return ok; } - auto beg_pos = sin.tellg(); - uint32_t crc = Crc32Stream(sin, hdr.CompressSize()); - PADDLE_ENFORCE_EQ(hdr.Checksum(), crc); - Clear(); - sin.seekg(beg_pos, sin.beg); - std::unique_ptr compressed_stream; - switch (hdr.CompressType()) { + auto beg_pos = in_.tellg(); + uint32_t crc = Crc32Stream(in_, header_.CompressSize()); + PADDLE_ENFORCE_EQ(header_.Checksum(), crc); + in_.seekg(beg_pos, in_.beg); + + switch (header_.CompressType()) { case Compressor::kNoCompress: break; case Compressor::kSnappy: - compressed_stream.reset(new snappy::iSnappyStream(sin)); + compressed_stream_.reset(new snappy::iSnappyStream(in_)); break; default: PADDLE_THROW("Not implemented"); } + return true; +} - std::istream& stream = compressed_stream ? *compressed_stream : sin; +bool ChunkParser::HasNext() const { return pos_ < header_.NumRecords(); } - for (uint32_t i = 0; i < hdr.NumRecords(); ++i) { - uint32_t rec_len; - stream.read(reinterpret_cast(&rec_len), sizeof(uint32_t)); - std::string buf; - buf.resize(rec_len); - stream.read(&buf[0], rec_len); - PADDLE_ENFORCE_EQ(rec_len, stream.gcount()); - Add(buf); +std::string ChunkParser::Next() { + if (!HasNext()) { + return ""; } - return true; + ++pos_; + std::istream& stream = compressed_stream_ ? *compressed_stream_ : in_; + uint32_t rec_len; + stream.read(reinterpret_cast(&rec_len), sizeof(uint32_t)); + std::string buf; + buf.resize(rec_len); + stream.read(&buf[0], rec_len); + PADDLE_ENFORCE_EQ(rec_len, stream.gcount()); + return buf; } - } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/recordio/chunk.h b/paddle/fluid/recordio/chunk.h index 71a1556a33..cfb954a591 100644 --- a/paddle/fluid/recordio/chunk.h +++ b/paddle/fluid/recordio/chunk.h @@ -13,6 +13,7 @@ // limitations under the License. #pragma once +#include #include #include @@ -53,9 +54,20 @@ class Chunk { DISABLE_COPY_AND_ASSIGN(Chunk); }; -size_t CompressData(const char* in, size_t in_length, Compressor ct, char* out); +class ChunkParser { + public: + explicit ChunkParser(std::istream& sin); + + bool Init(); + std::string Next(); + bool HasNext() const; -void DeflateData(const char* in, size_t in_length, Compressor ct, char* out); + private: + Header header_; + uint32_t pos_{0}; + std::istream& in_; + std::unique_ptr compressed_stream_; +}; } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/recordio/scanner.cc b/paddle/fluid/recordio/scanner.cc index 88b4d4001b..a0a2f98422 100644 --- a/paddle/fluid/recordio/scanner.cc +++ b/paddle/fluid/recordio/scanner.cc @@ -22,35 +22,34 @@ namespace paddle { namespace recordio { Scanner::Scanner(std::unique_ptr &&stream) - : stream_(std::move(stream)) { + : stream_(std::move(stream)), parser_(*stream_) { Reset(); } -Scanner::Scanner(const std::string &filename) { - stream_.reset(new std::ifstream(filename)); +Scanner::Scanner(const std::string &filename) + : stream_(new std::ifstream(filename)), parser_(*stream_) { + PADDLE_ENFORCE(static_cast(*stream_), "Cannot open file %s", filename); Reset(); } void Scanner::Reset() { stream_->clear(); stream_->seekg(0, std::ios::beg); - ParseNextChunk(); + parser_.Init(); } std::string Scanner::Next() { - PADDLE_ENFORCE(!eof_, "StopIteration"); - auto rec = cur_chunk_.Record(offset_++); - if (offset_ == cur_chunk_.NumRecords()) { - ParseNextChunk(); + if (stream_->eof()) { + return ""; } - return rec; -} -void Scanner::ParseNextChunk() { - eof_ = !cur_chunk_.Parse(*stream_); - offset_ = 0; + auto res = parser_.Next(); + if (!parser_.HasNext() && HasNext()) { + parser_.Init(); + } + return res; } -bool Scanner::HasNext() const { return !eof_; } +bool Scanner::HasNext() const { return !stream_->eof(); } } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/recordio/scanner.h b/paddle/fluid/recordio/scanner.h index 34f1b0c78d..0d885dd87a 100644 --- a/paddle/fluid/recordio/scanner.h +++ b/paddle/fluid/recordio/scanner.h @@ -37,11 +37,7 @@ class Scanner { private: std::unique_ptr stream_; - Chunk cur_chunk_; - size_t offset_; - bool eof_; - - void ParseNextChunk(); + ChunkParser parser_; }; } // namespace recordio } // namespace paddle diff --git a/paddle/fluid/string/printf.h b/paddle/fluid/string/printf.h index 062095a1c3..47de233773 100644 --- a/paddle/fluid/string/printf.h +++ b/paddle/fluid/string/printf.h @@ -83,6 +83,13 @@ void Fprintf(std::ostream& out, const char* fmt, const Args&... args) { tinyformat::vformat(out, fmt, tinyformat::makeFormatList(args...)); } +template +std::string Sprintf(const Args&... args) { + std::ostringstream oss; + Fprintf(oss, ""); + return oss.str(); +} + template std::string Sprintf(const char* fmt, const Args&... args) { std::ostringstream oss; diff --git a/paddle/fluid/string/printf_test.cc b/paddle/fluid/string/printf_test.cc index 678029f935..544b12ef3a 100644 --- a/paddle/fluid/string/printf_test.cc +++ b/paddle/fluid/string/printf_test.cc @@ -27,4 +27,5 @@ TEST(StringPrintf, StringPrintf) { EXPECT_EQ(std::string("Wednesday, July 27, 14:44"), paddle::string::Sprintf("%s, %s %d, %.2d:%.2d", weekday, month, day, hour, min)); + EXPECT_EQ(std::string(""), paddle::string::Sprintf()); } diff --git a/paddle/fluid/train/demo/CMakeLists.txt b/paddle/fluid/train/demo/CMakeLists.txt new file mode 100644 index 0000000000..78d6e5ff55 --- /dev/null +++ b/paddle/fluid/train/demo/CMakeLists.txt @@ -0,0 +1,66 @@ +cmake_minimum_required(VERSION 3.0) + +project(cpp_train_demo CXX C) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + +if(NOT DEFINED PADDLE_LIB) + message(FATAL_ERROR "please set PADDLE_LIB with -DPADDLE_LIB=/paddle/lib/dir") +endif() + +option(WITH_MKLDNN "Compile PaddlePaddle with MKLDNN" OFF) +option(WITH_MKL "Compile PaddlePaddle with MKL support, default use openblas." OFF) + +include_directories("${PADDLE_LIB}") +include_directories("${PADDLE_LIB}/third_party/install/protobuf/include") +include_directories("${PADDLE_LIB}/third_party/install/glog/include") +include_directories("${PADDLE_LIB}/third_party/install/gflags/include") +include_directories("${PADDLE_LIB}/third_party/install/snappy/include") +include_directories("${PADDLE_LIB}/third_party/install/snappystream/include") +include_directories("${PADDLE_LIB}/third_party/install/zlib/include") + +include_directories("${PADDLE_LIB}/third_party/boost") +include_directories("${PADDLE_LIB}/third_party/eigen3") + +link_directories("${PADDLE_LIB}/third_party/install/snappy/lib") +link_directories("${PADDLE_LIB}/third_party/install/snappystream/lib") +link_directories("${PADDLE_LIB}/third_party/install/protobuf/lib") +link_directories("${PADDLE_LIB}/third_party/install/glog/lib") +link_directories("${PADDLE_LIB}/third_party/install/gflags/lib") +link_directories("${PADDLE_LIB}/third_party/install/zlib/lib") + +add_executable(demo_trainer demo_trainer.cc) + +if(WITH_MKLDNN) + include_directories("${PADDLE_LIB}/third_party/install/mkldnn/include") + set(MKLDNN_LIB ${PADDLE_LIB}/third_party/install/mkldnn/lib/libmkldnn.so.0) +endif() + +if(WITH_MKL) + include_directories("${PADDLE_LIB}/third_party/install/mklml/include") + set(MATH_LIB ${PADDLE_LIB}/third_party/install/mklml/lib/libmklml_intel.so) +else() + if(APPLE) + set(MATH_LIB cblas) + else(APPLE) + set(MATH_LIB ${PADDLE_LIB}/third_party/install/openblas/lib/libopenblas.a) + endif(APPLE) +endif() + +if(APPLE) + set(MACOS_LD_FLAGS "-undefined dynamic_lookup -Wl,-all_load -framework CoreFoundation -framework Security") +else(APPLE) + set(ARCHIVE_START "-Wl,--whole-archive") + set(ARCHIVE_END "-Wl,--no-whole-archive") + set(EXTERNAL_LIB "-lrt -ldl -lpthread") +endif(APPLE) + +target_link_libraries(demo_trainer + ${MACOS_LD_FLAGS} + ${ARCHIVE_START} + ${PADDLE_LIB}/paddle/fluid/inference/libpaddle_fluid.a + ${ARCHIVE_END} + ${MATH_LIB} + ${MKLDNN_LIB} + glog gflags protobuf snappystream snappy z + ${EXTERNAL_LIB}) diff --git a/paddle/fluid/train/demo/README.md b/paddle/fluid/train/demo/README.md new file mode 100644 index 0000000000..41b01d3382 --- /dev/null +++ b/paddle/fluid/train/demo/README.md @@ -0,0 +1,66 @@ + +### step 1. build paddle lib + +``` + +# WITH_MKL=ON|OFF +# WITH_MKLDNN=ON|OFF + +PADDLE_LIB=/paddle/lib/dir +cmake .. -DFLUID_INSTALL_DIR=$PADDLE_LIB \ + -DCMAKE_BUILD_TYPE=Release \ + -DWITH_FLUID_ONLY=ON \ + -DWITH_GPU=OFF \ + -DWITH_STYLE_CHECK=OFF \ + -DWITH_MKL=OFF \ + -DWITH_MKLDNN=OFF +make -j8 +make -j8 inference_lib_dist +``` + +### step 2. generate program desc +``` +# please install paddle before run this scripe +pip install --upgrade paddlepaddle-*.whl +python demo_network.py +``` + +This will generate two program desc files: + - startup_program: used to init all parameters + - main_program: main logic of the network + +### step 3. build demo_trainer and run it. + + +``` +# Make a build dir at the same dir of this README.md document. +# The demo dir can be put anywhere. +mkdir build +cd build + +# WITH_MKL=ON|OFF +# WITH_MKLDNN=ON|OFF +PADDLE_LIB=/paddle/lib/dir + +# PADDLE_LIB is the same with FLUID_INSTALL_DIR when building the lib +cmake .. -DPADDLE_LIB=$PADDLE_LIB \ + -DWITH_MKLDNN=OFF \ + -DWITH_MKL=OFF +make + +# copy startup_program and main_program to this dir +cp ../startup_program . +cp ../main_program . + +# run demo cpp trainer +./demo_trainer + +``` + +The output will be: +``` +step: 0 loss: 1069.02 +step: 1 loss: 1069.02 +step: 2 loss: 1069.02 +.... +``` diff --git a/paddle/fluid/train/demo/demo_network.py b/paddle/fluid/train/demo/demo_network.py new file mode 100644 index 0000000000..41e98c6a24 --- /dev/null +++ b/paddle/fluid/train/demo/demo_network.py @@ -0,0 +1,47 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import paddle.fluid.framework as framework + + +def train_network(with_optimize): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + if with_optimize: + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.00001) + sgd_optimizer.minimize(avg_cost) + else: + fluid.backward.append_backward(avg_cost) + + +def save_program_desc(network_func): + startup_program = framework.Program() + train_program = framework.Program() + + with framework.program_guard(train_program, startup_program): + network_func(with_optimize=False) + + with open("startup_program", "w") as f: + f.write(startup_program.desc.serialize_to_string()) + with open("main_program", "w") as f: + f.write(train_program.desc.serialize_to_string()) + + +save_program_desc(train_network) diff --git a/paddle/fluid/train/demo/demo_trainer.cc b/paddle/fluid/train/demo/demo_trainer.cc new file mode 100644 index 0000000000..a0757b53f3 --- /dev/null +++ b/paddle/fluid/train/demo/demo_trainer.cc @@ -0,0 +1,115 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include "paddle/fluid/framework/executor.h" +#include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/tensor_util.h" +#include "paddle/fluid/platform/device_context.h" +#include "paddle/fluid/platform/init.h" +#include "paddle/fluid/platform/place.h" +#include "paddle/fluid/platform/profiler.h" + +namespace paddle { +namespace train { + +void ReadBinaryFile(const std::string& filename, std::string* contents) { + std::ifstream fin(filename, std::ios::in | std::ios::binary); + PADDLE_ENFORCE(static_cast(fin), "Cannot open file %s", filename); + fin.seekg(0, std::ios::end); + contents->clear(); + contents->resize(fin.tellg()); + fin.seekg(0, std::ios::beg); + fin.read(&(contents->at(0)), contents->size()); + fin.close(); +} + +std::unique_ptr Load( + paddle::framework::Executor* executor, const std::string& model_filename) { + VLOG(3) << "loading model from " << model_filename; + std::string program_desc_str; + ReadBinaryFile(model_filename, &program_desc_str); + + std::unique_ptr main_program( + new paddle::framework::ProgramDesc(program_desc_str)); + return main_program; +} + +} // namespace train +} // namespace paddle + +int main() { + paddle::framework::InitDevices(false); + + const auto cpu_place = paddle::platform::CPUPlace(); + + paddle::framework::Executor executor(cpu_place); + paddle::framework::Scope scope; + auto startup_program = paddle::train::Load(&executor, "startup_program"); + auto train_program = paddle::train::Load(&executor, "main_program"); + + std::string loss_name = ""; + for (auto op_desc : train_program->Block(0).AllOps()) { + if (op_desc->Type() == "mean") { + loss_name = op_desc->Output("Out")[0]; + break; + } + } + + PADDLE_ENFORCE_NE(loss_name, "", "loss not found"); + + // init all parameters + executor.Run(*startup_program.get(), &scope, 0); + + // prepare data + auto x_var = scope.Var("x"); + auto x_tensor = x_var->GetMutable(); + x_tensor->Resize({2, 13}); + + auto x_data = x_tensor->mutable_data(cpu_place); + for (int i = 0; i < 2 * 13; ++i) { + x_data[i] = static_cast(i); + } + + auto y_var = scope.Var("y"); + auto y_tensor = y_var->GetMutable(); + y_tensor->Resize({2, 1}); + auto y_data = y_tensor->mutable_data(cpu_place); + for (int i = 0; i < 2 * 1; ++i) { + y_data[i] = static_cast(i); + } + + auto loss_var = scope.Var(loss_name); + + paddle::platform::ProfilerState pf_state; + pf_state = paddle::platform::ProfilerState::kCPU; + paddle::platform::EnableProfiler(pf_state); + clock_t t1 = clock(); + + for (int i = 0; i < 10; ++i) { + executor.Run(*train_program.get(), &scope, 0, false, true); + std::cout << "step: " << i << " loss: " + << loss_var->Get().data()[0] + << std::endl; + } + + clock_t t2 = clock(); + paddle::platform::DisableProfiler(paddle::platform::EventSortingKey::kTotal, + "run_paddle_op_profiler"); + std::cout << "run_time = " << t2 - t1 << std::endl; + return 0; +} diff --git a/paddle/gserver/tests/Sequence/train.list b/paddle/gserver/tests/Sequence/train.list deleted file mode 100644 index be27acb3a5..0000000000 --- a/paddle/gserver/tests/Sequence/train.list +++ /dev/null @@ -1 +0,0 @@ -gserver/tests/Sequence/tour_train_wdseg diff --git a/paddle/gserver/tests/Sequence/train.list.nest b/paddle/gserver/tests/Sequence/train.list.nest deleted file mode 100644 index 7683ebc68e..0000000000 --- a/paddle/gserver/tests/Sequence/train.list.nest +++ /dev/null @@ -1 +0,0 @@ -gserver/tests/Sequence/tour_train_wdseg.nest diff --git a/paddle/api/Arguments.cpp b/paddle/legacy/api/Arguments.cpp similarity index 99% rename from paddle/api/Arguments.cpp rename to paddle/legacy/api/Arguments.cpp index 62d6a574d5..7bb5a6f75b 100644 --- a/paddle/api/Arguments.cpp +++ b/paddle/legacy/api/Arguments.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "PaddleAPI.h" #include "PaddleAPIPrivate.h" -#include "paddle/parameter/Argument.h" +#include "paddle/legacy/parameter/Argument.h" size_t Arguments::getSlotNum() const { return m->outputs.size(); } diff --git a/paddle/api/CMakeLists.txt b/paddle/legacy/api/CMakeLists.txt similarity index 100% rename from paddle/api/CMakeLists.txt rename to paddle/legacy/api/CMakeLists.txt diff --git a/paddle/api/ConfigParser.cpp b/paddle/legacy/api/ConfigParser.cpp similarity index 98% rename from paddle/api/ConfigParser.cpp rename to paddle/legacy/api/ConfigParser.cpp index d362a1e7cf..016d6da4e2 100644 --- a/paddle/api/ConfigParser.cpp +++ b/paddle/legacy/api/ConfigParser.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "PaddleAPI.h" #include "PaddleAPIPrivate.h" -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" struct ParameterConfigPrivate { paddle::ParameterPtr parameter; diff --git a/paddle/api/Evaluator.cpp b/paddle/legacy/api/Evaluator.cpp similarity index 100% rename from paddle/api/Evaluator.cpp rename to paddle/legacy/api/Evaluator.cpp diff --git a/paddle/api/GradientMachine.cpp b/paddle/legacy/api/GradientMachine.cpp similarity index 98% rename from paddle/api/GradientMachine.cpp rename to paddle/legacy/api/GradientMachine.cpp index a3d6f0f080..5ad2fe11a4 100644 --- a/paddle/api/GradientMachine.cpp +++ b/paddle/legacy/api/GradientMachine.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "PaddleAPIPrivate.h" #include "Internal.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" std::vector GradientMachine::defaultParamTypes = { PARAMETER_VALUE, PARAMETER_GRADIENT, PARAMETER_MOMENTUM}; @@ -94,7 +94,7 @@ void UpdateCallback::apply(Parameter* p) { } class UpdateCallbackWrapper { -public: + public: explicit UpdateCallbackWrapper(const UpdateCallback& callback) : callback(const_cast(callback)) {} @@ -105,7 +105,7 @@ public: delete p; } -private: + private: UpdateCallback& callback; }; diff --git a/paddle/api/Internal.h b/paddle/legacy/api/Internal.h similarity index 100% rename from paddle/api/Internal.h rename to paddle/legacy/api/Internal.h diff --git a/paddle/api/Matrix.cpp b/paddle/legacy/api/Matrix.cpp similarity index 98% rename from paddle/api/Matrix.cpp rename to paddle/legacy/api/Matrix.cpp index 8282b4629d..8862d0ea92 100644 --- a/paddle/api/Matrix.cpp +++ b/paddle/legacy/api/Matrix.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" #include #include #include "PaddleAPI.h" -#include "paddle/math/CpuSparseMatrix.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/CpuSparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" struct MatrixPrivate { std::shared_ptr mat; diff --git a/paddle/api/Paddle.i b/paddle/legacy/api/Paddle.i similarity index 98% rename from paddle/api/Paddle.i rename to paddle/legacy/api/Paddle.i index 3237e73745..7a1456a5c0 100644 --- a/paddle/api/Paddle.i +++ b/paddle/legacy/api/Paddle.i @@ -2,7 +2,7 @@ %include "std_string.i" %{ #define SWIG_FILE_WITH_INIT -#include "api/PaddleAPI.h" +#include "legacy/api/PaddleAPI.h" %} %include "exception.i" @@ -198,5 +198,5 @@ namespace std { %ignore ParameterConfigPrivate; %ignore OptimizationConfigPrivate; %ignore ParameterTraverseCallbackPrivate; -%include "utils/GlobalConstants.h" -%include "api/PaddleAPI.h" +%include "legacy/utils/GlobalConstants.h" +%include "legacy/api/PaddleAPI.h" diff --git a/paddle/api/PaddleAPI.h b/paddle/legacy/api/PaddleAPI.h similarity index 97% rename from paddle/api/PaddleAPI.h rename to paddle/legacy/api/PaddleAPI.h index 67368d1a99..475984a3d5 100644 --- a/paddle/api/PaddleAPI.h +++ b/paddle/legacy/api/PaddleAPI.h @@ -19,9 +19,9 @@ limitations under the License. */ #include #include #include -#include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/GlobalConstants.h" /// Import PaddlePaddle's enumeration into global namespace. using namespace paddle::enumeration_wrapper; // NOLINT @@ -59,9 +59,10 @@ class RangeError {}; /// Not support Error, such as access GPU memory directly, etc. class UnsupportError : public std::runtime_error { -public: - UnsupportError() : std::runtime_error(" "){}; - UnsupportError(const std::string& message) : std::runtime_error(message){}; + public: + UnsupportError() : std::runtime_error(" ") {} + explicit UnsupportError(const std::string& message) + : std::runtime_error(message) {} }; /// This type will map to python's list of float. @@ -105,7 +106,7 @@ class Matrix { DISABLE_COPY(Matrix); static Matrix* createByPaddleMatrixPtr(void* sharedPtr); -public: + public: virtual ~Matrix(); /** @@ -231,7 +232,7 @@ public: bool isGpu() const; -private: + private: void* getSharedPtr() const; MatrixPrivate* m; @@ -248,7 +249,7 @@ class Vector { void* getSharedPtr(); -public: + public: ~Vector(); /// Create Vector filled with zero. @@ -310,10 +311,10 @@ public: /// __len__ in python size_t getSize() const; -private: + private: VectorPrivate* m; -private: + private: friend class Parameter; friend class ParameterOptimizer; friend struct ParameterTraverseCallbackPrivate; @@ -325,7 +326,7 @@ class IVector { DISABLE_COPY(IVector); static IVector* createByPaddleVectorPtr(void* ptr); -public: + public: /// Create IVector filled with zero static IVector* createZero(size_t sz, bool useGpu = isUsingGpu()); @@ -389,7 +390,7 @@ public: /// This method will map to python __len__(); size_t getSize() const; -private: + private: void* getSharedPtr() const; friend class Arguments; @@ -400,11 +401,11 @@ struct ArgumentsPrivate; /// The Arguments is actual a std::vector in paddle. class Arguments { -private: + private: Arguments(); // Internal Create. DISABLE_COPY(Arguments); -public: + public: /** * Create a arguments with size. * Note that it can be zero. @@ -475,12 +476,12 @@ public: float sum() const; -private: + private: static Arguments* createByPaddleArgumentVector(void* ptr); static Arguments* createByPaddleArgument(const void* ptr); void* getInternalArgumentsPtr() const; -private: + private: ArgumentsPrivate* m; friend class Trainer; friend class GradientMachine; @@ -507,7 +508,7 @@ class ParameterConfig { static ParameterConfig* createParameterConfigFromParameterPtr(void* ptr); void* getRawPtr(); -public: + public: ~ParameterConfig(); /** @@ -515,10 +516,10 @@ public: */ std::string toProtoString() const; -private: + private: ParameterConfigPrivate* m; -private: + private: friend class Parameter; friend class ParameterOptimizer; friend struct ParameterTraverseCallbackPrivate; @@ -529,7 +530,7 @@ class OptimizationConfig { DISABLE_COPY(OptimizationConfig); OptimizationConfig(); -public: + public: static OptimizationConfig* createFromProtoString(const std::string& str); ~OptimizationConfig(); @@ -538,7 +539,7 @@ public: */ std::string toProtoString(); -private: + private: OptimizationConfigPrivate* m; friend class TrainerConfig; @@ -549,11 +550,11 @@ private: struct ParameterPrivate; class Parameter { -private: + private: Parameter(); DISABLE_COPY(Parameter); -public: + public: virtual ~Parameter(); /** @@ -580,11 +581,11 @@ public: size_t getSize() const; -private: + private: static Parameter* createFromRawPtr(void* ptr); static Parameter* createFromSharedPtr(void* ptr); -private: + private: ParameterPrivate* m; friend class UpdateCallbackWrapper; friend class GradientMachine; @@ -598,14 +599,14 @@ struct ModelConfigPrivate; * It is used by GradientMachine. */ class ModelConfig { -private: + private: ModelConfig(); DISABLE_COPY(ModelConfig); -public: + public: virtual ~ModelConfig(); -private: + private: ModelConfigPrivate* m; friend class TrainerConfig; friend struct TrainerConfigPrivate; @@ -619,11 +620,11 @@ struct TrainerConfigPrivate; * It is used by GradientMachine. */ class TrainerConfig { -private: + private: TrainerConfig(); DISABLE_COPY(TrainerConfig); -public: + public: virtual ~TrainerConfig(); static TrainerConfig* createFromTrainerConfigFile( @@ -634,7 +635,7 @@ public: OptimizationConfig* getOptimizationConfig() const; -private: + private: TrainerConfigPrivate* m; friend class Trainer; }; @@ -654,7 +655,7 @@ private: * @endcode */ class UpdateCallback { -public: + public: virtual ~UpdateCallback(); virtual void apply(Parameter* p); }; @@ -664,14 +665,14 @@ class ParameterTraverseCallback { DISABLE_COPY(ParameterTraverseCallback); ParameterTraverseCallback(); -public: + public: ~ParameterTraverseCallback(); void apply(const std::vector& vecs, const ParameterConfig& config, size_t sparseId); -private: + private: ParameterTraverseCallbackPrivate* m; friend class ParameterOptimizer; }; @@ -686,7 +687,7 @@ class ParameterOptimizer { DISABLE_COPY(ParameterOptimizer); ParameterOptimizer(); -public: + public: static ParameterOptimizer* create(OptimizationConfig* config); ~ParameterOptimizer(); @@ -710,7 +711,7 @@ public: ParameterTraverseCallback* needSpecialTraversal( const ParameterConfig& config) const; -private: + private: ParameterOptimizerPrivate* m; }; @@ -718,11 +719,11 @@ class SequenceGenerator; class Evaluator; struct GradientMachinePrivate; class GradientMachine { -private: + private: GradientMachine(); DISABLE_COPY(GradientMachine); -public: + public: virtual ~GradientMachine(); /** @@ -817,7 +818,7 @@ public: void eval(Evaluator* evaluator); -private: + private: GradientMachinePrivate* m; static GradientMachine* createFromPaddleModelPtr( @@ -833,10 +834,10 @@ private: struct ParameterUpdaterPrivate; class ParameterUpdater { -private: + private: ParameterUpdater(); -public: + public: static ParameterUpdater* createLocalUpdater(OptimizationConfig* config); static ParameterUpdater* createRemoteUpdater(OptimizationConfig* config, int passCount, @@ -911,17 +912,17 @@ public: */ void catchUpWith(); -private: + private: ParameterUpdaterPrivate* m; }; struct EvaluatorPrivate; class Evaluator { -private: + private: Evaluator(); DISABLE_COPY(Evaluator); -public: + public: ~Evaluator(); /** @@ -945,7 +946,7 @@ public: double getValue(const std::string name) const; -private: + private: EvaluatorPrivate* m; friend class GradientMachine; @@ -953,13 +954,13 @@ private: struct TrainerPrivate; class Trainer { -private: + private: TrainerPrivate* m; Trainer(); Trainer(TrainerConfig* optConfig, GradientMachine* gm); DISABLE_COPY(Trainer); -public: + public: virtual ~Trainer(); /// Create A Trainer By TrainerConfig. using paddle command line. @@ -1002,7 +1003,7 @@ public: /// the N-Best results generated from one input sequence. class ISequenceResults { -public: + public: virtual ~ISequenceResults(); /// Number of result. @@ -1026,7 +1027,7 @@ class SequenceGenerator { DISABLE_COPY(SequenceGenerator); SequenceGenerator(); -public: + public: virtual ~SequenceGenerator(); /** @@ -1044,10 +1045,10 @@ public: void setMaxLength(size_t maxlength); void setBeamSize(size_t beamSize); -private: + private: static SequenceGenerator* createByGradientMachineSharedPtr(void* ptr); friend class GradientMachine; -private: + private: SequenceGeneratorPrivate* m; }; diff --git a/paddle/api/PaddleAPIPrivate.h b/paddle/legacy/api/PaddleAPIPrivate.h similarity index 91% rename from paddle/api/PaddleAPIPrivate.h rename to paddle/legacy/api/PaddleAPIPrivate.h index e141fcd761..3ee192c31d 100644 --- a/paddle/api/PaddleAPIPrivate.h +++ b/paddle/legacy/api/PaddleAPIPrivate.h @@ -14,10 +14,10 @@ limitations under the License. */ #pragma once #include #include "PaddleAPI.h" -#include "paddle/gserver/evaluators/Evaluator.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/parameter/ParameterUpdaterBase.h" -#include "paddle/trainer/TrainerConfigHelper.h" +#include "paddle/legacy/gserver/evaluators/Evaluator.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/parameter/ParameterUpdaterBase.h" +#include "paddle/legacy/trainer/TrainerConfigHelper.h" struct GradientMachinePrivate { std::shared_ptr machine; diff --git a/paddle/api/Parameter.cpp b/paddle/legacy/api/Parameter.cpp similarity index 97% rename from paddle/api/Parameter.cpp rename to paddle/legacy/api/Parameter.cpp index 589d22e74e..f05740eb75 100644 --- a/paddle/api/Parameter.cpp +++ b/paddle/legacy/api/Parameter.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/parameter/Parameter.h" +#include "paddle/legacy/parameter/Parameter.h" #include "PaddleAPI.h" #include "PaddleAPIPrivate.h" diff --git a/paddle/api/ParameterOptimizer.cpp b/paddle/legacy/api/ParameterOptimizer.cpp similarity index 98% rename from paddle/api/ParameterOptimizer.cpp rename to paddle/legacy/api/ParameterOptimizer.cpp index d4620be3e6..477d9dae44 100644 --- a/paddle/api/ParameterOptimizer.cpp +++ b/paddle/legacy/api/ParameterOptimizer.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/parameter/ParameterOptimizer.h" +#include "paddle/legacy/parameter/ParameterOptimizer.h" #include #include "Internal.h" #include "PaddleAPI.h" diff --git a/paddle/api/ParameterUpdater.cpp b/paddle/legacy/api/ParameterUpdater.cpp similarity index 94% rename from paddle/api/ParameterUpdater.cpp rename to paddle/legacy/api/ParameterUpdater.cpp index 63c000c959..44af3f4635 100644 --- a/paddle/api/ParameterUpdater.cpp +++ b/paddle/legacy/api/ParameterUpdater.cpp @@ -16,10 +16,10 @@ limitations under the License. */ #include "PaddleAPIPrivate.h" #ifndef PADDLE_WITHOUT_GOLANG -#include "paddle/trainer/NewRemoteParameterUpdater.h" +#include "paddle/legacy/trainer/NewRemoteParameterUpdater.h" #endif -#include "paddle/trainer/RemoteParameterUpdater.h" -#include "paddle/trainer/ThreadParameterUpdater.h" +#include "paddle/legacy/trainer/RemoteParameterUpdater.h" +#include "paddle/legacy/trainer/ThreadParameterUpdater.h" ParameterUpdater::ParameterUpdater() : m(new ParameterUpdaterPrivate()) {} diff --git a/paddle/api/SequenceGenerator.cpp b/paddle/legacy/api/SequenceGenerator.cpp similarity index 97% rename from paddle/api/SequenceGenerator.cpp rename to paddle/legacy/api/SequenceGenerator.cpp index 1b30aec8f6..2a73228f6d 100644 --- a/paddle/api/SequenceGenerator.cpp +++ b/paddle/legacy/api/SequenceGenerator.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include #include #include "PaddleAPI.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/parameter/Argument.h" -#include "paddle/utils/Flags.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/parameter/Argument.h" +#include "paddle/legacy/utils/Flags.h" // used to represent partial sequence struct Path { @@ -138,7 +138,7 @@ struct SequenceGeneratorPrivate { maxLength(0UL), feedback(__create_feedback__()) {} -private: + private: static paddle::Argument __create_feedback__() { paddle::Argument feedback; feedback.ids = paddle::IVector::create(/* size= */ 1, FLAGS_use_gpu); @@ -157,7 +157,7 @@ SequenceGenerator::~SequenceGenerator() { delete m; } class PathSequenceResults : public ISequenceResults { // ISequenceResults interface -public: + public: PathSequenceResults(const std::shared_ptr>& path, const std::shared_ptr>& dict) : path_(path), dict_(dict) {} @@ -196,7 +196,7 @@ public: } } -private: + private: std::shared_ptr> path_; std::shared_ptr> dict_; }; diff --git a/paddle/api/Trainer.cpp b/paddle/legacy/api/Trainer.cpp similarity index 95% rename from paddle/api/Trainer.cpp rename to paddle/legacy/api/Trainer.cpp index 795460b650..e7c607201b 100644 --- a/paddle/api/Trainer.cpp +++ b/paddle/legacy/api/Trainer.cpp @@ -19,11 +19,11 @@ limitations under the License. */ #include #include -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/trainer/ParamUtil.h" -#include "paddle/trainer/Trainer.h" -#include "paddle/trainer/TrainerInternal.h" -#include "paddle/utils/Flags.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/trainer/ParamUtil.h" +#include "paddle/legacy/trainer/Trainer.h" +#include "paddle/legacy/trainer/TrainerInternal.h" +#include "paddle/legacy/utils/Flags.h" using paddle::real; diff --git a/paddle/api/Util.cpp b/paddle/legacy/api/Util.cpp similarity index 89% rename from paddle/api/Util.cpp rename to paddle/legacy/api/Util.cpp index 618e87e964..b458c4d90e 100644 --- a/paddle/api/Util.cpp +++ b/paddle/legacy/api/Util.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #include "PaddleAPI.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Util.h" #include #include diff --git a/paddle/api/Vector.cpp b/paddle/legacy/api/Vector.cpp similarity index 99% rename from paddle/api/Vector.cpp rename to paddle/legacy/api/Vector.cpp index e2a7b974ca..73b6d3a15d 100644 --- a/paddle/api/Vector.cpp +++ b/paddle/legacy/api/Vector.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "PaddleAPI.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Vector.h" #include diff --git a/paddle/api/__init__.py b/paddle/legacy/api/__init__.py similarity index 100% rename from paddle/api/__init__.py rename to paddle/legacy/api/__init__.py diff --git a/paddle/api/numpy.i b/paddle/legacy/api/numpy.i similarity index 100% rename from paddle/api/numpy.i rename to paddle/legacy/api/numpy.i diff --git a/paddle/api/test/.gitignore b/paddle/legacy/api/test/.gitignore similarity index 100% rename from paddle/api/test/.gitignore rename to paddle/legacy/api/test/.gitignore diff --git a/paddle/api/test/CMakeLists.txt b/paddle/legacy/api/test/CMakeLists.txt similarity index 100% rename from paddle/api/test/CMakeLists.txt rename to paddle/legacy/api/test/CMakeLists.txt diff --git a/paddle/api/test/testArguments.py b/paddle/legacy/api/test/testArguments.py similarity index 100% rename from paddle/api/test/testArguments.py rename to paddle/legacy/api/test/testArguments.py diff --git a/paddle/api/test/testGradientMachine.py b/paddle/legacy/api/test/testGradientMachine.py similarity index 100% rename from paddle/api/test/testGradientMachine.py rename to paddle/legacy/api/test/testGradientMachine.py diff --git a/paddle/api/test/testMatrix.py b/paddle/legacy/api/test/testMatrix.py similarity index 100% rename from paddle/api/test/testMatrix.py rename to paddle/legacy/api/test/testMatrix.py diff --git a/paddle/api/test/testTrain.py b/paddle/legacy/api/test/testTrain.py similarity index 100% rename from paddle/api/test/testTrain.py rename to paddle/legacy/api/test/testTrain.py diff --git a/paddle/api/test/testTrainConfig.py b/paddle/legacy/api/test/testTrainConfig.py similarity index 100% rename from paddle/api/test/testTrainConfig.py rename to paddle/legacy/api/test/testTrainConfig.py diff --git a/paddle/api/test/testTrainer.py b/paddle/legacy/api/test/testTrainer.py similarity index 100% rename from paddle/api/test/testTrainer.py rename to paddle/legacy/api/test/testTrainer.py diff --git a/paddle/api/test/testVector.py b/paddle/legacy/api/test/testVector.py similarity index 100% rename from paddle/api/test/testVector.py rename to paddle/legacy/api/test/testVector.py diff --git a/paddle/api/test/util.py b/paddle/legacy/api/test/util.py similarity index 100% rename from paddle/api/test/util.py rename to paddle/legacy/api/test/util.py diff --git a/paddle/capi/Arguments.cpp b/paddle/legacy/capi/Arguments.cpp similarity index 91% rename from paddle/capi/Arguments.cpp rename to paddle/legacy/capi/Arguments.cpp index 87fac3d6c6..0ce1770c76 100644 --- a/paddle/capi/Arguments.cpp +++ b/paddle/legacy/capi/Arguments.cpp @@ -66,6 +66,17 @@ paddle_error paddle_arguments_get_value(paddle_arguments args, return kPD_NO_ERROR; } +PD_API paddle_error paddle_arguments_get_prob(paddle_arguments args, + uint64_t ID, + paddle_matrix mat) { + if (args == nullptr || mat == nullptr) return kPD_NULLPTR; + auto m = paddle::capi::cast(mat); + auto a = castArg(args); + if (ID >= a->args.size()) return kPD_OUT_OF_RANGE; + m->mat = a->args[ID].in; + return kPD_NO_ERROR; +} + paddle_error paddle_arguments_get_ids(paddle_arguments args, uint64_t ID, paddle_ivector ids) { diff --git a/paddle/capi/CMakeLists.txt b/paddle/legacy/capi/CMakeLists.txt similarity index 97% rename from paddle/capi/CMakeLists.txt rename to paddle/legacy/capi/CMakeLists.txt index e06e9a2b36..957b1a3e6b 100644 --- a/paddle/capi/CMakeLists.txt +++ b/paddle/legacy/capi/CMakeLists.txt @@ -33,9 +33,6 @@ add_library(paddle_capi STATIC ${CAPI_HEADERS} ${CAPI_PRIVATE_HEADER} target_include_directories(paddle_capi PUBLIC ${CMAKE_CURRENT_BINARY_DIR}) -add_style_check_target(paddle_capi ${CAPI_SOURCES} ${CAPI_HEADER} - ${CAPI_PRIVATE_HEADER}) - add_dependencies(paddle_capi paddle_proto paddle_gserver) # TODO: paddle_capi_whole will be removed. diff --git a/paddle/capi/Main.cpp b/paddle/legacy/capi/Main.cpp similarity index 90% rename from paddle/capi/Main.cpp rename to paddle/legacy/capi/Main.cpp index 0a289dede6..17d8f00a88 100644 --- a/paddle/capi/Main.cpp +++ b/paddle/legacy/capi/Main.cpp @@ -18,9 +18,9 @@ limitations under the License. */ #include #include "capi_private.h" #include "main.h" -#include "paddle/trainer/TrainerConfigHelper.h" -#include "paddle/utils/Excepts.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/trainer/TrainerConfigHelper.h" +#include "paddle/legacy/utils/Excepts.h" +#include "paddle/legacy/utils/PythonUtil.h" static void initPaddle(int argc, char** argv) { paddle::initMain(argc, argv); diff --git a/paddle/capi/Matrix.cpp b/paddle/legacy/capi/Matrix.cpp similarity index 100% rename from paddle/capi/Matrix.cpp rename to paddle/legacy/capi/Matrix.cpp diff --git a/paddle/capi/Vector.cpp b/paddle/legacy/capi/Vector.cpp similarity index 100% rename from paddle/capi/Vector.cpp rename to paddle/legacy/capi/Vector.cpp diff --git a/paddle/capi/arguments.h b/paddle/legacy/capi/arguments.h similarity index 92% rename from paddle/capi/arguments.h rename to paddle/legacy/capi/arguments.h index 69a66bb012..ceb64ee6aa 100644 --- a/paddle/capi/arguments.h +++ b/paddle/legacy/capi/arguments.h @@ -87,6 +87,18 @@ PD_API paddle_error paddle_arguments_get_value(paddle_arguments args, uint64_t ID, paddle_matrix mat); +/** + * @brief paddle_arguments_get_prob Get the prob matrix of beam search, which + * slot ID is `ID` + * @param [in] args arguments array + * @param [in] ID array index + * @param [out] mat matrix pointer + * @return paddle_error + */ +PD_API paddle_error paddle_arguments_get_prob(paddle_arguments args, + uint64_t ID, + paddle_matrix mat); + /** * @brief PDArgsGetIds Get the integer vector of one argument in array, which * index is `ID`. diff --git a/paddle/capi/capi.h b/paddle/legacy/capi/capi.h similarity index 100% rename from paddle/capi/capi.h rename to paddle/legacy/capi/capi.h diff --git a/paddle/capi/capi_private.h b/paddle/legacy/capi/capi_private.h similarity index 90% rename from paddle/capi/capi_private.h rename to paddle/legacy/capi/capi_private.h index 3332f42a4a..e5f8c8c5c8 100644 --- a/paddle/capi/capi_private.h +++ b/paddle/legacy/capi/capi_private.h @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "capi.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/parameter/Argument.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/parameter/Argument.h" #pragma once namespace paddle { diff --git a/paddle/capi/config.h.in b/paddle/legacy/capi/config.h.in similarity index 100% rename from paddle/capi/config.h.in rename to paddle/legacy/capi/config.h.in diff --git a/paddle/capi/error.cpp b/paddle/legacy/capi/error.cpp similarity index 100% rename from paddle/capi/error.cpp rename to paddle/legacy/capi/error.cpp diff --git a/paddle/capi/error.h b/paddle/legacy/capi/error.h similarity index 100% rename from paddle/capi/error.h rename to paddle/legacy/capi/error.h diff --git a/paddle/capi/examples/.gitignore b/paddle/legacy/capi/examples/.gitignore similarity index 100% rename from paddle/capi/examples/.gitignore rename to paddle/legacy/capi/examples/.gitignore diff --git a/paddle/capi/examples/README.md b/paddle/legacy/capi/examples/README.md similarity index 100% rename from paddle/capi/examples/README.md rename to paddle/legacy/capi/examples/README.md diff --git a/paddle/capi/examples/model_inference/README.md b/paddle/legacy/capi/examples/model_inference/README.md similarity index 100% rename from paddle/capi/examples/model_inference/README.md rename to paddle/legacy/capi/examples/model_inference/README.md diff --git a/paddle/capi/examples/model_inference/common/common.h b/paddle/legacy/capi/examples/model_inference/common/common.h similarity index 100% rename from paddle/capi/examples/model_inference/common/common.h rename to paddle/legacy/capi/examples/model_inference/common/common.h diff --git a/paddle/capi/examples/model_inference/dense/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/dense/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/dense/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/dense/CMakeLists.txt diff --git a/paddle/capi/examples/model_inference/dense/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/dense/convert_protobin.sh similarity index 100% rename from paddle/capi/examples/model_inference/dense/convert_protobin.sh rename to paddle/legacy/capi/examples/model_inference/dense/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/dense/main.c b/paddle/legacy/capi/examples/model_inference/dense/main.c similarity index 100% rename from paddle/capi/examples/model_inference/dense/main.c rename to paddle/legacy/capi/examples/model_inference/dense/main.c diff --git a/paddle/capi/examples/model_inference/dense/merge_v2_model.py b/paddle/legacy/capi/examples/model_inference/dense/merge_v2_model.py similarity index 100% rename from paddle/capi/examples/model_inference/dense/merge_v2_model.py rename to paddle/legacy/capi/examples/model_inference/dense/merge_v2_model.py diff --git a/paddle/capi/examples/model_inference/dense/mnist_v2.py b/paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py similarity index 100% rename from paddle/capi/examples/model_inference/dense/mnist_v2.py rename to paddle/legacy/capi/examples/model_inference/dense/mnist_v2.py diff --git a/paddle/capi/examples/model_inference/dense/trainer_config.py b/paddle/legacy/capi/examples/model_inference/dense/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/dense/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/dense/trainer_config.py diff --git a/paddle/capi/examples/model_inference/multi_thread/.gitignore b/paddle/legacy/capi/examples/model_inference/multi_thread/.gitignore similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/.gitignore rename to paddle/legacy/capi/examples/model_inference/multi_thread/.gitignore diff --git a/paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/multi_thread/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/multi_thread/CMakeLists.txt diff --git a/paddle/legacy/capi/examples/model_inference/multi_thread/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/multi_thread/convert_protobin.sh new file mode 100644 index 0000000000..b29f2cd214 --- /dev/null +++ b/paddle/legacy/capi/examples/model_inference/multi_thread/convert_protobin.sh @@ -0,0 +1 @@ +../dense/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/multi_thread/main.c b/paddle/legacy/capi/examples/model_inference/multi_thread/main.c similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/main.c rename to paddle/legacy/capi/examples/model_inference/multi_thread/main.c diff --git a/paddle/capi/examples/model_inference/multi_thread/main_gpu.c b/paddle/legacy/capi/examples/model_inference/multi_thread/main_gpu.c similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/main_gpu.c rename to paddle/legacy/capi/examples/model_inference/multi_thread/main_gpu.c diff --git a/paddle/capi/examples/model_inference/multi_thread/trainer_config.py b/paddle/legacy/capi/examples/model_inference/multi_thread/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/multi_thread/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/multi_thread/trainer_config.py diff --git a/paddle/capi/examples/model_inference/sequence/.gitignore b/paddle/legacy/capi/examples/model_inference/sequence/.gitignore similarity index 100% rename from paddle/capi/examples/model_inference/sequence/.gitignore rename to paddle/legacy/capi/examples/model_inference/sequence/.gitignore diff --git a/paddle/capi/examples/model_inference/sequence/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/sequence/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/sequence/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/sequence/CMakeLists.txt diff --git a/paddle/legacy/capi/examples/model_inference/sequence/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/sequence/convert_protobin.sh new file mode 100644 index 0000000000..b29f2cd214 --- /dev/null +++ b/paddle/legacy/capi/examples/model_inference/sequence/convert_protobin.sh @@ -0,0 +1 @@ +../dense/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/sequence/main.c b/paddle/legacy/capi/examples/model_inference/sequence/main.c similarity index 100% rename from paddle/capi/examples/model_inference/sequence/main.c rename to paddle/legacy/capi/examples/model_inference/sequence/main.c diff --git a/paddle/capi/examples/model_inference/sequence/trainer_config.py b/paddle/legacy/capi/examples/model_inference/sequence/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/sequence/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/sequence/trainer_config.py diff --git a/paddle/capi/examples/model_inference/sparse_binary/.gitignore b/paddle/legacy/capi/examples/model_inference/sparse_binary/.gitignore similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/.gitignore rename to paddle/legacy/capi/examples/model_inference/sparse_binary/.gitignore diff --git a/paddle/capi/examples/model_inference/sparse_binary/CMakeLists.txt b/paddle/legacy/capi/examples/model_inference/sparse_binary/CMakeLists.txt similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/CMakeLists.txt rename to paddle/legacy/capi/examples/model_inference/sparse_binary/CMakeLists.txt diff --git a/paddle/legacy/capi/examples/model_inference/sparse_binary/convert_protobin.sh b/paddle/legacy/capi/examples/model_inference/sparse_binary/convert_protobin.sh new file mode 100644 index 0000000000..b29f2cd214 --- /dev/null +++ b/paddle/legacy/capi/examples/model_inference/sparse_binary/convert_protobin.sh @@ -0,0 +1 @@ +../dense/convert_protobin.sh diff --git a/paddle/capi/examples/model_inference/sparse_binary/main.c b/paddle/legacy/capi/examples/model_inference/sparse_binary/main.c similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/main.c rename to paddle/legacy/capi/examples/model_inference/sparse_binary/main.c diff --git a/paddle/capi/examples/model_inference/sparse_binary/trainer_config.py b/paddle/legacy/capi/examples/model_inference/sparse_binary/trainer_config.py similarity index 100% rename from paddle/capi/examples/model_inference/sparse_binary/trainer_config.py rename to paddle/legacy/capi/examples/model_inference/sparse_binary/trainer_config.py diff --git a/paddle/capi/gradient_machine.cpp b/paddle/legacy/capi/gradient_machine.cpp similarity index 98% rename from paddle/capi/gradient_machine.cpp rename to paddle/legacy/capi/gradient_machine.cpp index ea9aab00e3..0c5ddd856b 100644 --- a/paddle/capi/gradient_machine.cpp +++ b/paddle/legacy/capi/gradient_machine.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "gradient_machine.h" #include "capi_private.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" #define cast(v) paddle::capi::cast(v) @@ -26,7 +26,7 @@ enum GradientMatchineCreateMode { namespace paddle { class MyNeuralNetwork : public NeuralNetwork { -public: + public: MyNeuralNetwork(const std::string& name, NeuralNetwork* network) : NeuralNetwork(name, network) {} }; diff --git a/paddle/capi/gradient_machine.h b/paddle/legacy/capi/gradient_machine.h similarity index 100% rename from paddle/capi/gradient_machine.h rename to paddle/legacy/capi/gradient_machine.h diff --git a/paddle/capi/main.h b/paddle/legacy/capi/main.h similarity index 100% rename from paddle/capi/main.h rename to paddle/legacy/capi/main.h diff --git a/paddle/capi/matrix.h b/paddle/legacy/capi/matrix.h similarity index 100% rename from paddle/capi/matrix.h rename to paddle/legacy/capi/matrix.h diff --git a/paddle/capi/paddle_capi.map b/paddle/legacy/capi/paddle_capi.map similarity index 100% rename from paddle/capi/paddle_capi.map rename to paddle/legacy/capi/paddle_capi.map diff --git a/paddle/capi/tests/.gitignore b/paddle/legacy/capi/tests/.gitignore similarity index 100% rename from paddle/capi/tests/.gitignore rename to paddle/legacy/capi/tests/.gitignore diff --git a/paddle/capi/tests/CMakeLists.txt b/paddle/legacy/capi/tests/CMakeLists.txt similarity index 100% rename from paddle/capi/tests/CMakeLists.txt rename to paddle/legacy/capi/tests/CMakeLists.txt diff --git a/paddle/capi/tests/test_Arguments.cpp b/paddle/legacy/capi/tests/test_Arguments.cpp similarity index 99% rename from paddle/capi/tests/test_Arguments.cpp rename to paddle/legacy/capi/tests/test_Arguments.cpp index bb08adf716..6fb379719d 100644 --- a/paddle/capi/tests/test_Arguments.cpp +++ b/paddle/legacy/capi/tests/test_Arguments.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "capi.h" #include "gtest/gtest.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" static std::vector randomBuffer(size_t bufSize) { auto& eng = paddle::ThreadLocalRandomEngine::get(); diff --git a/paddle/capi/tests/test_GradientMachine.cpp b/paddle/legacy/capi/tests/test_GradientMachine.cpp similarity index 96% rename from paddle/capi/tests/test_GradientMachine.cpp rename to paddle/legacy/capi/tests/test_GradientMachine.cpp index 73b9e477b2..5d1b7cb6ca 100644 --- a/paddle/capi/tests/test_GradientMachine.cpp +++ b/paddle/legacy/capi/tests/test_GradientMachine.cpp @@ -13,13 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include +#include +#include #include #include #include #include "capi.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/ThreadLocal.h" static std::vector randomBuffer(size_t bufSize) { auto& eng = paddle::ThreadLocalRandomEngine::get(); diff --git a/paddle/capi/tests/test_Matrix.cpp b/paddle/legacy/capi/tests/test_Matrix.cpp similarity index 100% rename from paddle/capi/tests/test_Matrix.cpp rename to paddle/legacy/capi/tests/test_Matrix.cpp diff --git a/paddle/capi/tests/test_Vector.cpp b/paddle/legacy/capi/tests/test_Vector.cpp similarity index 100% rename from paddle/capi/tests/test_Vector.cpp rename to paddle/legacy/capi/tests/test_Vector.cpp diff --git a/paddle/capi/tests/test_predict_network.py b/paddle/legacy/capi/tests/test_predict_network.py similarity index 100% rename from paddle/capi/tests/test_predict_network.py rename to paddle/legacy/capi/tests/test_predict_network.py diff --git a/paddle/capi/vector.h b/paddle/legacy/capi/vector.h similarity index 100% rename from paddle/capi/vector.h rename to paddle/legacy/capi/vector.h diff --git a/paddle/cuda/CMakeLists.txt b/paddle/legacy/cuda/CMakeLists.txt similarity index 93% rename from paddle/cuda/CMakeLists.txt rename to paddle/legacy/cuda/CMakeLists.txt index efd1b7a73e..9bbb8de78e 100755 --- a/paddle/cuda/CMakeLists.txt +++ b/paddle/legacy/cuda/CMakeLists.txt @@ -87,8 +87,3 @@ else() endif() add_dependencies(paddle_cuda paddle_proto ${external_project_dependencies}) - -add_style_check_target(paddle_cuda - ${CUDA_SOURCES} - ${CUDA_HEADERS} - ${CUDA_CXX_SOURCES}) diff --git a/paddle/cuda/include/hl_activation_functions.h b/paddle/legacy/cuda/include/hl_activation_functions.h similarity index 99% rename from paddle/cuda/include/hl_activation_functions.h rename to paddle/legacy/cuda/include/hl_activation_functions.h index 29ec248420..66a69db545 100644 --- a/paddle/cuda/include/hl_activation_functions.h +++ b/paddle/legacy/cuda/include/hl_activation_functions.h @@ -31,7 +31,7 @@ namespace hppl { */ template class Active { -public: + public: typedef T (*forward)(T); typedef T (*backward)(T, T); }; diff --git a/paddle/cuda/include/hl_aggregate.h b/paddle/legacy/cuda/include/hl_aggregate.h similarity index 100% rename from paddle/cuda/include/hl_aggregate.h rename to paddle/legacy/cuda/include/hl_aggregate.h diff --git a/paddle/cuda/include/hl_avx_functions.h b/paddle/legacy/cuda/include/hl_avx_functions.h similarity index 100% rename from paddle/cuda/include/hl_avx_functions.h rename to paddle/legacy/cuda/include/hl_avx_functions.h diff --git a/paddle/cuda/include/hl_base.h b/paddle/legacy/cuda/include/hl_base.h similarity index 98% rename from paddle/cuda/include/hl_base.h rename to paddle/legacy/cuda/include/hl_base.h index 77f5d82dbe..bfe812a438 100644 --- a/paddle/cuda/include/hl_base.h +++ b/paddle/legacy/cuda/include/hl_base.h @@ -207,8 +207,8 @@ typedef struct { #ifdef __NVCC__ #include -#include "paddle/cuda/include/hl_cuda.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/cuda/include/hl_cuda.h" +#include "paddle/legacy/utils/Logging.h" extern __thread bool g_sync_flag; extern __thread cudaStream_t default_stream; diff --git a/paddle/cuda/include/hl_batch_norm.h b/paddle/legacy/cuda/include/hl_batch_norm.h similarity index 100% rename from paddle/cuda/include/hl_batch_norm.h rename to paddle/legacy/cuda/include/hl_batch_norm.h diff --git a/paddle/cuda/include/hl_batch_transpose.h b/paddle/legacy/cuda/include/hl_batch_transpose.h similarity index 100% rename from paddle/cuda/include/hl_batch_transpose.h rename to paddle/legacy/cuda/include/hl_batch_transpose.h diff --git a/paddle/cuda/include/hl_cnn.h b/paddle/legacy/cuda/include/hl_cnn.h similarity index 100% rename from paddle/cuda/include/hl_cnn.h rename to paddle/legacy/cuda/include/hl_cnn.h diff --git a/paddle/cuda/include/hl_cpu_gru.cuh b/paddle/legacy/cuda/include/hl_cpu_gru.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_gru.cuh rename to paddle/legacy/cuda/include/hl_cpu_gru.cuh diff --git a/paddle/cuda/include/hl_cpu_lstm.cuh b/paddle/legacy/cuda/include/hl_cpu_lstm.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_lstm.cuh rename to paddle/legacy/cuda/include/hl_cpu_lstm.cuh diff --git a/paddle/cuda/include/hl_cpu_matrix_kernel.cuh b/paddle/legacy/cuda/include/hl_cpu_matrix_kernel.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_matrix_kernel.cuh rename to paddle/legacy/cuda/include/hl_cpu_matrix_kernel.cuh diff --git a/paddle/cuda/include/hl_cpu_matrix_kernel_detail.cuh b/paddle/legacy/cuda/include/hl_cpu_matrix_kernel_detail.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_matrix_kernel_detail.cuh rename to paddle/legacy/cuda/include/hl_cpu_matrix_kernel_detail.cuh diff --git a/paddle/cuda/include/hl_cpu_scalar.cuh b/paddle/legacy/cuda/include/hl_cpu_scalar.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_scalar.cuh rename to paddle/legacy/cuda/include/hl_cpu_scalar.cuh diff --git a/paddle/cuda/include/hl_cpu_simd_neon.cuh b/paddle/legacy/cuda/include/hl_cpu_simd_neon.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_simd_neon.cuh rename to paddle/legacy/cuda/include/hl_cpu_simd_neon.cuh diff --git a/paddle/cuda/include/hl_cpu_simd_sse.cuh b/paddle/legacy/cuda/include/hl_cpu_simd_sse.cuh similarity index 100% rename from paddle/cuda/include/hl_cpu_simd_sse.cuh rename to paddle/legacy/cuda/include/hl_cpu_simd_sse.cuh diff --git a/paddle/cuda/include/hl_cuda.h b/paddle/legacy/cuda/include/hl_cuda.h similarity index 100% rename from paddle/cuda/include/hl_cuda.h rename to paddle/legacy/cuda/include/hl_cuda.h diff --git a/paddle/cuda/include/hl_cuda.ph b/paddle/legacy/cuda/include/hl_cuda.ph similarity index 100% rename from paddle/cuda/include/hl_cuda.ph rename to paddle/legacy/cuda/include/hl_cuda.ph diff --git a/paddle/cuda/include/hl_cuda_cublas.h b/paddle/legacy/cuda/include/hl_cuda_cublas.h similarity index 100% rename from paddle/cuda/include/hl_cuda_cublas.h rename to paddle/legacy/cuda/include/hl_cuda_cublas.h diff --git a/paddle/cuda/include/hl_cuda_cudnn.h b/paddle/legacy/cuda/include/hl_cuda_cudnn.h similarity index 100% rename from paddle/cuda/include/hl_cuda_cudnn.h rename to paddle/legacy/cuda/include/hl_cuda_cudnn.h diff --git a/paddle/cuda/include/hl_cuda_cudnn.ph b/paddle/legacy/cuda/include/hl_cuda_cudnn.ph similarity index 100% rename from paddle/cuda/include/hl_cuda_cudnn.ph rename to paddle/legacy/cuda/include/hl_cuda_cudnn.ph diff --git a/paddle/cuda/include/hl_device_functions.cuh b/paddle/legacy/cuda/include/hl_device_functions.cuh similarity index 100% rename from paddle/cuda/include/hl_device_functions.cuh rename to paddle/legacy/cuda/include/hl_device_functions.cuh diff --git a/paddle/cuda/include/hl_functions.h b/paddle/legacy/cuda/include/hl_functions.h similarity index 100% rename from paddle/cuda/include/hl_functions.h rename to paddle/legacy/cuda/include/hl_functions.h diff --git a/paddle/cuda/include/hl_gpu.h b/paddle/legacy/cuda/include/hl_gpu.h similarity index 100% rename from paddle/cuda/include/hl_gpu.h rename to paddle/legacy/cuda/include/hl_gpu.h diff --git a/paddle/cuda/include/hl_gpu_functions.cuh b/paddle/legacy/cuda/include/hl_gpu_functions.cuh similarity index 100% rename from paddle/cuda/include/hl_gpu_functions.cuh rename to paddle/legacy/cuda/include/hl_gpu_functions.cuh diff --git a/paddle/cuda/include/hl_gpu_gru.cuh b/paddle/legacy/cuda/include/hl_gpu_gru.cuh similarity index 99% rename from paddle/cuda/include/hl_gpu_gru.cuh rename to paddle/legacy/cuda/include/hl_gpu_gru.cuh index 9fcad2c3bc..8d299572c7 100644 --- a/paddle/cuda/include/hl_gpu_gru.cuh +++ b/paddle/legacy/cuda/include/hl_gpu_gru.cuh @@ -18,7 +18,7 @@ limitations under the License. */ #ifdef __NVCC__ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" /* * threads(framePerBlock, batchPerBlock) diff --git a/paddle/cuda/include/hl_gpu_lstm.cuh b/paddle/legacy/cuda/include/hl_gpu_lstm.cuh similarity index 99% rename from paddle/cuda/include/hl_gpu_lstm.cuh rename to paddle/legacy/cuda/include/hl_gpu_lstm.cuh index 92517a44d2..aae011b838 100644 --- a/paddle/cuda/include/hl_gpu_lstm.cuh +++ b/paddle/legacy/cuda/include/hl_gpu_lstm.cuh @@ -18,7 +18,7 @@ limitations under the License. */ #ifdef __NVCC__ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "hl_device_functions.cuh" /* diff --git a/paddle/cuda/include/hl_gpu_matrix_kernel.cuh b/paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh similarity index 99% rename from paddle/cuda/include/hl_gpu_matrix_kernel.cuh rename to paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh index 0db023ce37..6177d23657 100644 --- a/paddle/cuda/include/hl_gpu_matrix_kernel.cuh +++ b/paddle/legacy/cuda/include/hl_gpu_matrix_kernel.cuh @@ -18,7 +18,7 @@ limitations under the License. */ #define HL_GPU_MATRIX_KERNEL_CUH_ #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "hl_base.h" #ifdef __NVCC__ diff --git a/paddle/cuda/include/hl_gru_ops.cuh b/paddle/legacy/cuda/include/hl_gru_ops.cuh similarity index 100% rename from paddle/cuda/include/hl_gru_ops.cuh rename to paddle/legacy/cuda/include/hl_gru_ops.cuh diff --git a/paddle/cuda/include/hl_lstm.h b/paddle/legacy/cuda/include/hl_lstm.h similarity index 100% rename from paddle/cuda/include/hl_lstm.h rename to paddle/legacy/cuda/include/hl_lstm.h diff --git a/paddle/cuda/include/hl_lstm_ops.cuh b/paddle/legacy/cuda/include/hl_lstm_ops.cuh similarity index 100% rename from paddle/cuda/include/hl_lstm_ops.cuh rename to paddle/legacy/cuda/include/hl_lstm_ops.cuh diff --git a/paddle/cuda/include/hl_matrix.h b/paddle/legacy/cuda/include/hl_matrix.h similarity index 100% rename from paddle/cuda/include/hl_matrix.h rename to paddle/legacy/cuda/include/hl_matrix.h diff --git a/paddle/cuda/include/hl_matrix_apply.cuh b/paddle/legacy/cuda/include/hl_matrix_apply.cuh similarity index 100% rename from paddle/cuda/include/hl_matrix_apply.cuh rename to paddle/legacy/cuda/include/hl_matrix_apply.cuh diff --git a/paddle/cuda/include/hl_matrix_base.cuh b/paddle/legacy/cuda/include/hl_matrix_base.cuh similarity index 100% rename from paddle/cuda/include/hl_matrix_base.cuh rename to paddle/legacy/cuda/include/hl_matrix_base.cuh diff --git a/paddle/cuda/include/hl_matrix_base_detail.cuh b/paddle/legacy/cuda/include/hl_matrix_base_detail.cuh similarity index 100% rename from paddle/cuda/include/hl_matrix_base_detail.cuh rename to paddle/legacy/cuda/include/hl_matrix_base_detail.cuh diff --git a/paddle/cuda/include/hl_matrix_ops.cuh b/paddle/legacy/cuda/include/hl_matrix_ops.cuh similarity index 100% rename from paddle/cuda/include/hl_matrix_ops.cuh rename to paddle/legacy/cuda/include/hl_matrix_ops.cuh diff --git a/paddle/cuda/include/hl_matrix_type.cuh b/paddle/legacy/cuda/include/hl_matrix_type.cuh similarity index 100% rename from paddle/cuda/include/hl_matrix_type.cuh rename to paddle/legacy/cuda/include/hl_matrix_type.cuh diff --git a/paddle/cuda/include/hl_perturbation_util.cuh b/paddle/legacy/cuda/include/hl_perturbation_util.cuh similarity index 100% rename from paddle/cuda/include/hl_perturbation_util.cuh rename to paddle/legacy/cuda/include/hl_perturbation_util.cuh diff --git a/paddle/cuda/include/hl_recurrent_apply.cuh b/paddle/legacy/cuda/include/hl_recurrent_apply.cuh similarity index 100% rename from paddle/cuda/include/hl_recurrent_apply.cuh rename to paddle/legacy/cuda/include/hl_recurrent_apply.cuh diff --git a/paddle/cuda/include/hl_sequence.h b/paddle/legacy/cuda/include/hl_sequence.h similarity index 100% rename from paddle/cuda/include/hl_sequence.h rename to paddle/legacy/cuda/include/hl_sequence.h diff --git a/paddle/cuda/include/hl_sparse.h b/paddle/legacy/cuda/include/hl_sparse.h similarity index 100% rename from paddle/cuda/include/hl_sparse.h rename to paddle/legacy/cuda/include/hl_sparse.h diff --git a/paddle/cuda/include/hl_sparse.ph b/paddle/legacy/cuda/include/hl_sparse.ph similarity index 100% rename from paddle/cuda/include/hl_sparse.ph rename to paddle/legacy/cuda/include/hl_sparse.ph diff --git a/paddle/cuda/include/hl_table_apply.h b/paddle/legacy/cuda/include/hl_table_apply.h similarity index 100% rename from paddle/cuda/include/hl_table_apply.h rename to paddle/legacy/cuda/include/hl_table_apply.h diff --git a/paddle/cuda/include/hl_tensor_ops.h b/paddle/legacy/cuda/include/hl_tensor_ops.h similarity index 92% rename from paddle/cuda/include/hl_tensor_ops.h rename to paddle/legacy/cuda/include/hl_tensor_ops.h index 85a022ff5e..bc5e5da53d 100644 --- a/paddle/cuda/include/hl_tensor_ops.h +++ b/paddle/legacy/cuda/include/hl_tensor_ops.h @@ -23,128 +23,128 @@ namespace unary { template class add_scale { -private: + private: const T p; -public: + public: INLINE add_scale(const T s) : p(s) {} INLINE T operator()(const T a) const { return a + p; } }; template class sub_scale { -private: + private: const T p; -public: + public: INLINE sub_scale(const T s) : p(s) {} INLINE T operator()(const T a) const { return a - p; } }; template class mul_scale { -private: + private: const T p; -public: + public: INLINE mul_scale(const T s) : p(s) {} INLINE T operator()(const T a) const { return a * p; } }; template class div_scale { -private: + private: const T p; -public: + public: INLINE div_scale(const T s) : p(s) {} INLINE T operator()(const T a) const { return a / p; } }; template class neg { -public: + public: INLINE T operator()(const T a) const { return -a; } }; template class exp_op { -public: + public: INLINE T operator()(const T a) const { return std::exp(a); } }; template class log_op { -public: + public: INLINE T operator()(const T a) const { return std::log(a); } }; template class sqrt_op { -public: + public: INLINE T operator()(const T a) const { return std::sqrt(a); } }; template class square { -public: + public: INLINE T operator()(const T a) const { return a * a; } }; template class reciprocal { -public: + public: INLINE T operator()(const T a) const { return T(1) / a; } }; template class abs { -public: + public: INLINE T operator()(const T a) const { return a > 0 ? a : -a; } }; template class sign { -public: + public: INLINE T operator()(const T a) const { return (a > 0) - (a < 0); } }; template class min { -private: + private: const T p; -public: + public: INLINE min(const T s) : p(s) {} INLINE T operator()(const T a) const { return a > p ? p : a; } }; template class max { -private: + private: const T p; -public: + public: INLINE max(const T s) : p(s) {} INLINE T operator()(const T a) const { return a < p ? p : a; } }; template class pow_op { -private: + private: const T p; -public: + public: INLINE pow_op(const T s) : p(s) {} INLINE T operator()(const T a) const { return std::pow(a, p); } }; template class constant { -private: + private: const T p; -public: + public: INLINE constant(const T s) : p(s) {} INLINE T operator()(int i) const { return p; } INLINE T operator()(int i, int j) const { return p; } @@ -152,80 +152,80 @@ public: template class cmp_eq { -private: + private: const T p; -public: + public: INLINE cmp_eq(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a == p; } }; template class cmp_ne { -private: + private: const T p; -public: + public: INLINE cmp_ne(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a != p; } }; template class cmp_le { -private: + private: const T p; -public: + public: INLINE cmp_le(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a <= p; } }; template class cmp_lt { -private: + private: const T p; -public: + public: INLINE cmp_lt(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a < p; } }; template class cmp_ge { -private: + private: const T p; -public: + public: INLINE cmp_ge(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a >= p; } }; template class cmp_gt { -private: + private: const T p; -public: + public: INLINE cmp_gt(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a > p; } }; template class and_op { -private: + private: const T p; -public: + public: INLINE and_op(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a && p; } }; template class or_op { -private: + private: const T p; -public: + public: INLINE or_op(const T s) : p(s) {} INLINE bool operator()(const T a) const { return a || p; } }; @@ -235,96 +235,96 @@ public: namespace binary { template class add { -public: + public: INLINE T operator()(const T a, const T b) const { return a + b; } }; template class add_scale { -private: + private: const T p1; const T p2; -public: + public: INLINE add_scale(const T s1, const T s2) : p1(s1), p2(s2) {} INLINE T operator()(const T a, const T b) const { return p1 * a + p2 * b; } }; template class sub { -public: + public: INLINE T operator()(const T a, const T b) const { return a - b; } }; template class mul { -public: + public: INLINE T operator()(const T a, const T b) const { return a * b; } }; template class div { -public: + public: INLINE T operator()(const T a, const T b) const { return a / b; } }; template class cmp_eq { -public: + public: INLINE bool operator()(const T a, const T b) const { return a == b; } }; template class cmp_ne { -public: + public: INLINE bool operator()(const T a, const T b) const { return a != b; } }; template class cmp_le { -public: + public: INLINE bool operator()(const T a, const T b) const { return a <= b; } }; template class cmp_lt { -public: + public: INLINE bool operator()(const T a, const T b) const { return a < b; } }; template class cmp_ge { -public: + public: INLINE bool operator()(const T a, const T b) const { return a >= b; } }; template class cmp_gt { -public: + public: INLINE bool operator()(const T a, const T b) const { return a > b; } }; template class and_op { -public: + public: INLINE bool operator()(const T a, const T b) const { return a && b; } }; template class or_op { -public: + public: INLINE bool operator()(const T a, const T b) const { return a || b; } }; template class min { -public: + public: INLINE T operator()(const T a, const T b) const { return a > b ? b : a; } }; template class max { -public: + public: INLINE T operator()(const T a, const T b) const { return a < b ? b : a; } }; @@ -332,7 +332,7 @@ public: #ifndef PADDLE_TYPE_DOUBLE template <> class add<__m128> { -public: + public: INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_add_ps(a, b); } @@ -340,11 +340,11 @@ public: template <> class add_scale<__m128> { -private: + private: const __m128 p1; const __m128 p2; -public: + public: INLINE add_scale(const __m128 s1, const __m128 s2) : p1(s1), p2(s2) {} INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_add_ps(_mm_mul_ps(p1, a), _mm_mul_ps(p2, b)); @@ -353,7 +353,7 @@ public: template <> class sub<__m128> { -public: + public: INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_sub_ps(a, b); } @@ -361,7 +361,7 @@ public: template <> class mul<__m128> { -public: + public: INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_mul_ps(a, b); } @@ -369,7 +369,7 @@ public: template <> class div<__m128> { -public: + public: INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_div_ps(a, b); } @@ -377,7 +377,7 @@ public: template <> class min<__m128> { -public: + public: INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_min_ps(a, b); } @@ -385,7 +385,7 @@ public: template <> class max<__m128> { -public: + public: INLINE __m128 operator()(const __m128 a, const __m128 b) const { return _mm_max_ps(a, b); } @@ -393,7 +393,7 @@ public: #else template <> class add<__m128d> { -public: + public: INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_add_pd(a, b); } @@ -401,11 +401,11 @@ public: template <> class add_scale<__m128d> { -private: + private: const __m128d p1; const __m128d p2; -public: + public: INLINE add_scale(const __m128d s1, const __m128d s2) : p1(s1), p2(s2) {} INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_add_pd(_mm_mul_pd(p1, a), _mm_mul_pd(p2, b)); @@ -414,7 +414,7 @@ public: template <> class sub<__m128d> { -public: + public: INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_sub_pd(a, b); } @@ -422,7 +422,7 @@ public: template <> class mul<__m128d> { -public: + public: INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_mul_pd(a, b); } @@ -430,7 +430,7 @@ public: template <> class div<__m128d> { -public: + public: INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_div_pd(a, b); } @@ -438,7 +438,7 @@ public: template <> class min<__m128d> { -public: + public: INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_min_pd(a, b); } @@ -446,7 +446,7 @@ public: template <> class max<__m128d> { -public: + public: INLINE __m128d operator()(const __m128d a, const __m128d b) const { return _mm_max_pd(a, b); } @@ -458,7 +458,7 @@ public: #ifndef PADDLE_TYPE_DOUBLE template <> class add { -public: + public: INLINE float32x4_t operator()(const float32x4_t a, const float32x4_t b) const { return vaddq_f32(a, b); @@ -467,11 +467,11 @@ public: template <> class add_scale { -private: + private: const float32x4_t p1; const float32x4_t p2; -public: + public: INLINE add_scale(const float32x4_t s1, const float32x4_t s2) : p1(s1), p2(s2) {} INLINE float32x4_t operator()(const float32x4_t a, @@ -482,7 +482,7 @@ public: template <> class sub { -public: + public: INLINE float32x4_t operator()(const float32x4_t a, const float32x4_t b) const { return vsubq_f32(a, b); @@ -491,7 +491,7 @@ public: template <> class mul { -public: + public: INLINE float32x4_t operator()(const float32x4_t a, const float32x4_t b) const { return vmulq_f32(a, b); @@ -500,7 +500,7 @@ public: template <> class div { -public: + public: INLINE float32x4_t operator()(const float32x4_t a, const float32x4_t b) const { float32x4_t tmp = vrecpeq_f32(b); @@ -510,7 +510,7 @@ public: template <> class min { -public: + public: INLINE float32x4_t operator()(const float32x4_t a, const float32x4_t b) const { return vminq_f32(a, b); @@ -519,7 +519,7 @@ public: template <> class max { -public: + public: INLINE float32x4_t operator()(const float32x4_t a, const float32x4_t b) const { return vmaxq_f32(a, b); diff --git a/paddle/cuda/include/hl_thread.ph b/paddle/legacy/cuda/include/hl_thread.ph similarity index 100% rename from paddle/cuda/include/hl_thread.ph rename to paddle/legacy/cuda/include/hl_thread.ph diff --git a/paddle/cuda/include/hl_time.h b/paddle/legacy/cuda/include/hl_time.h similarity index 100% rename from paddle/cuda/include/hl_time.h rename to paddle/legacy/cuda/include/hl_time.h diff --git a/paddle/cuda/include/hl_top_k.h b/paddle/legacy/cuda/include/hl_top_k.h similarity index 100% rename from paddle/cuda/include/hl_top_k.h rename to paddle/legacy/cuda/include/hl_top_k.h diff --git a/paddle/cuda/include/hl_warpctc_wrap.h b/paddle/legacy/cuda/include/hl_warpctc_wrap.h similarity index 100% rename from paddle/cuda/include/hl_warpctc_wrap.h rename to paddle/legacy/cuda/include/hl_warpctc_wrap.h diff --git a/paddle/cuda/include/stub/hl_aggregate_stub.h b/paddle/legacy/cuda/include/stub/hl_aggregate_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_aggregate_stub.h rename to paddle/legacy/cuda/include/stub/hl_aggregate_stub.h diff --git a/paddle/cuda/include/stub/hl_cnn_stub.h b/paddle/legacy/cuda/include/stub/hl_cnn_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_cnn_stub.h rename to paddle/legacy/cuda/include/stub/hl_cnn_stub.h diff --git a/paddle/cuda/include/stub/hl_cuda_cublas_stub.h b/paddle/legacy/cuda/include/stub/hl_cuda_cublas_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_cuda_cublas_stub.h rename to paddle/legacy/cuda/include/stub/hl_cuda_cublas_stub.h diff --git a/paddle/cuda/include/stub/hl_cuda_cudnn_stub.h b/paddle/legacy/cuda/include/stub/hl_cuda_cudnn_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_cuda_cudnn_stub.h rename to paddle/legacy/cuda/include/stub/hl_cuda_cudnn_stub.h diff --git a/paddle/cuda/include/stub/hl_cuda_stub.h b/paddle/legacy/cuda/include/stub/hl_cuda_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_cuda_stub.h rename to paddle/legacy/cuda/include/stub/hl_cuda_stub.h diff --git a/paddle/cuda/include/stub/hl_lstm_stub.h b/paddle/legacy/cuda/include/stub/hl_lstm_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_lstm_stub.h rename to paddle/legacy/cuda/include/stub/hl_lstm_stub.h diff --git a/paddle/cuda/include/stub/hl_matrix_stub.h b/paddle/legacy/cuda/include/stub/hl_matrix_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_matrix_stub.h rename to paddle/legacy/cuda/include/stub/hl_matrix_stub.h diff --git a/paddle/cuda/include/stub/hl_sequence_stub.h b/paddle/legacy/cuda/include/stub/hl_sequence_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_sequence_stub.h rename to paddle/legacy/cuda/include/stub/hl_sequence_stub.h diff --git a/paddle/cuda/include/stub/hl_sparse_stub.h b/paddle/legacy/cuda/include/stub/hl_sparse_stub.h similarity index 100% rename from paddle/cuda/include/stub/hl_sparse_stub.h rename to paddle/legacy/cuda/include/stub/hl_sparse_stub.h diff --git a/paddle/cuda/src/avx_mathfun.h b/paddle/legacy/cuda/src/avx_mathfun.h similarity index 100% rename from paddle/cuda/src/avx_mathfun.h rename to paddle/legacy/cuda/src/avx_mathfun.h diff --git a/paddle/cuda/src/hl_avx_functions.cc b/paddle/legacy/cuda/src/hl_avx_functions.cc similarity index 100% rename from paddle/cuda/src/hl_avx_functions.cc rename to paddle/legacy/cuda/src/hl_avx_functions.cc diff --git a/paddle/cuda/src/hl_batch_norm.cu b/paddle/legacy/cuda/src/hl_batch_norm.cu similarity index 100% rename from paddle/cuda/src/hl_batch_norm.cu rename to paddle/legacy/cuda/src/hl_batch_norm.cu diff --git a/paddle/cuda/src/hl_batch_transpose.cu b/paddle/legacy/cuda/src/hl_batch_transpose.cu similarity index 100% rename from paddle/cuda/src/hl_batch_transpose.cu rename to paddle/legacy/cuda/src/hl_batch_transpose.cu diff --git a/paddle/cuda/src/hl_cpu_functions.cc b/paddle/legacy/cuda/src/hl_cpu_functions.cc similarity index 100% rename from paddle/cuda/src/hl_cpu_functions.cc rename to paddle/legacy/cuda/src/hl_cpu_functions.cc diff --git a/paddle/cuda/src/hl_cuda_aggregate.cu b/paddle/legacy/cuda/src/hl_cuda_aggregate.cu similarity index 99% rename from paddle/cuda/src/hl_cuda_aggregate.cu rename to paddle/legacy/cuda/src/hl_cuda_aggregate.cu index d30c264127..9831c5ecc3 100644 --- a/paddle/cuda/src/hl_cuda_aggregate.cu +++ b/paddle/legacy/cuda/src/hl_cuda_aggregate.cu @@ -18,7 +18,7 @@ limitations under the License. */ #include "hl_cuda.ph" #include "hl_matrix_base.cuh" #include "hl_thread.ph" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" /** * @brief matrix row operator. diff --git a/paddle/cuda/src/hl_cuda_cnn.cu b/paddle/legacy/cuda/src/hl_cuda_cnn.cu similarity index 100% rename from paddle/cuda/src/hl_cuda_cnn.cu rename to paddle/legacy/cuda/src/hl_cuda_cnn.cu diff --git a/paddle/cuda/src/hl_cuda_cublas.cc b/paddle/legacy/cuda/src/hl_cuda_cublas.cc similarity index 99% rename from paddle/cuda/src/hl_cuda_cublas.cc rename to paddle/legacy/cuda/src/hl_cuda_cublas.cc index 975df42878..283b8b6e9c 100644 --- a/paddle/cuda/src/hl_cuda_cublas.cc +++ b/paddle/legacy/cuda/src/hl_cuda_cublas.cc @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "hl_cuda.h" #include "hl_thread.ph" -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" namespace dynload { diff --git a/paddle/cuda/src/hl_cuda_cudnn.cc b/paddle/legacy/cuda/src/hl_cuda_cudnn.cc similarity index 99% rename from paddle/cuda/src/hl_cuda_cudnn.cc rename to paddle/legacy/cuda/src/hl_cuda_cudnn.cc index dfa935dcff..b0ac5aaac2 100644 --- a/paddle/cuda/src/hl_cuda_cudnn.cc +++ b/paddle/legacy/cuda/src/hl_cuda_cudnn.cc @@ -17,8 +17,8 @@ limitations under the License. */ #include #include "hl_cuda_cudnn.ph" #include "hl_thread.ph" -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_int32(cudnn_conv_workspace_limit_in_mb, 4096, diff --git a/paddle/cuda/src/hl_cuda_device.cc b/paddle/legacy/cuda/src/hl_cuda_device.cc similarity index 99% rename from paddle/cuda/src/hl_cuda_device.cc rename to paddle/legacy/cuda/src/hl_cuda_device.cc index 3025aa4852..501e3b0f3b 100644 --- a/paddle/cuda/src/hl_cuda_device.cc +++ b/paddle/legacy/cuda/src/hl_cuda_device.cc @@ -23,8 +23,8 @@ limitations under the License. */ #include #include "hl_cuda.ph" #include "hl_thread.ph" -#include "paddle/utils/Logging.h" -#include "paddle/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" // clang-format on namespace dynload { diff --git a/paddle/cuda/src/hl_cuda_lstm.cu b/paddle/legacy/cuda/src/hl_cuda_lstm.cu similarity index 99% rename from paddle/cuda/src/hl_cuda_lstm.cu rename to paddle/legacy/cuda/src/hl_cuda_lstm.cu index e30fcddffd..9ac564fd25 100644 --- a/paddle/cuda/src/hl_cuda_lstm.cu +++ b/paddle/legacy/cuda/src/hl_cuda_lstm.cu @@ -16,7 +16,7 @@ limitations under the License. */ #include "hl_base.h" #include "hl_cuda_cublas.h" #include "hl_device_functions.cuh" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" typedef hppl::Active::forward t_forward; typedef hppl::Active::backward t_backward; @@ -30,7 +30,7 @@ bool hl_lstm_sequence_parallel(int frameSize) { } class frameValue { -public: + public: real *value_; __device__ frameValue(real *value) : value_(value) {} template diff --git a/paddle/cuda/src/hl_cuda_matrix.cu b/paddle/legacy/cuda/src/hl_cuda_matrix.cu similarity index 99% rename from paddle/cuda/src/hl_cuda_matrix.cu rename to paddle/legacy/cuda/src/hl_cuda_matrix.cu index 3e17c8090c..6fe460026b 100644 --- a/paddle/cuda/src/hl_cuda_matrix.cu +++ b/paddle/legacy/cuda/src/hl_cuda_matrix.cu @@ -20,7 +20,7 @@ limitations under the License. */ #include "hl_matrix_ops.cuh" #include "hl_sequence.h" #include "hl_sparse.ph" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_MATRIX_UNARY_OP(Zero, a = 0); DEFINE_MATRIX_TERNARY_PARAMETER_OP(_add, TWO_PARAMETER, c = p1 * a + p2 * b); diff --git a/paddle/cuda/src/hl_cuda_sequence.cu b/paddle/legacy/cuda/src/hl_cuda_sequence.cu similarity index 99% rename from paddle/cuda/src/hl_cuda_sequence.cu rename to paddle/legacy/cuda/src/hl_cuda_sequence.cu index a3a5f038de..1d772b5ce2 100644 --- a/paddle/cuda/src/hl_cuda_sequence.cu +++ b/paddle/legacy/cuda/src/hl_cuda_sequence.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "hl_base.h" #include "hl_device_functions.cuh" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" __global__ void KeMaxSequenceForward(real* input, const int* sequence, diff --git a/paddle/cuda/src/hl_cuda_sparse.cu b/paddle/legacy/cuda/src/hl_cuda_sparse.cu similarity index 99% rename from paddle/cuda/src/hl_cuda_sparse.cu rename to paddle/legacy/cuda/src/hl_cuda_sparse.cu index 432041fed5..8065a6f9f6 100644 --- a/paddle/cuda/src/hl_cuda_sparse.cu +++ b/paddle/legacy/cuda/src/hl_cuda_sparse.cu @@ -18,7 +18,7 @@ limitations under the License. */ #include "hl_matrix_ops.cuh" #include "hl_sparse.h" #include "hl_sparse.ph" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_MATRIX_UNARY_PARAMETER_OP(mul_scalar, ONE_PARAMETER, a = a * p); DEFINE_MATRIX_UNARY_OP(Zero, a = 0); diff --git a/paddle/cuda/src/hl_cuda_sparse.cuh b/paddle/legacy/cuda/src/hl_cuda_sparse.cuh similarity index 100% rename from paddle/cuda/src/hl_cuda_sparse.cuh rename to paddle/legacy/cuda/src/hl_cuda_sparse.cuh diff --git a/paddle/cuda/src/hl_math.cc b/paddle/legacy/cuda/src/hl_math.cc similarity index 100% rename from paddle/cuda/src/hl_math.cc rename to paddle/legacy/cuda/src/hl_math.cc diff --git a/paddle/cuda/src/hl_perturbation_util.cu b/paddle/legacy/cuda/src/hl_perturbation_util.cu similarity index 100% rename from paddle/cuda/src/hl_perturbation_util.cu rename to paddle/legacy/cuda/src/hl_perturbation_util.cu diff --git a/paddle/cuda/src/hl_table_apply.cu b/paddle/legacy/cuda/src/hl_table_apply.cu similarity index 98% rename from paddle/cuda/src/hl_table_apply.cu rename to paddle/legacy/cuda/src/hl_table_apply.cu index efa4bef02b..7411ae35d3 100644 --- a/paddle/cuda/src/hl_table_apply.cu +++ b/paddle/legacy/cuda/src/hl_table_apply.cu @@ -15,7 +15,7 @@ limitations under the License. */ #include "hl_base.h" #include "hl_cuda.h" #include "hl_device_functions.cuh" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" template __global__ void KeMatrixAddRows(real* output, diff --git a/paddle/cuda/src/hl_time.cc b/paddle/legacy/cuda/src/hl_time.cc similarity index 100% rename from paddle/cuda/src/hl_time.cc rename to paddle/legacy/cuda/src/hl_time.cc diff --git a/paddle/cuda/src/hl_top_k.cu b/paddle/legacy/cuda/src/hl_top_k.cu similarity index 98% rename from paddle/cuda/src/hl_top_k.cu rename to paddle/legacy/cuda/src/hl_top_k.cu index b17290557c..041ac419f5 100644 --- a/paddle/cuda/src/hl_top_k.cu +++ b/paddle/legacy/cuda/src/hl_top_k.cu @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/cuda/include/hl_base.h" -#include "paddle/cuda/include/hl_sparse.ph" -#include "paddle/cuda/include/hl_top_k.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/cuda/include/hl_base.h" +#include "paddle/legacy/cuda/include/hl_sparse.ph" +#include "paddle/legacy/cuda/include/hl_top_k.h" +#include "paddle/legacy/utils/Logging.h" // using namespace hppl; diff --git a/paddle/cuda/src/hl_warpctc_wrap.cc b/paddle/legacy/cuda/src/hl_warpctc_wrap.cc similarity index 98% rename from paddle/cuda/src/hl_warpctc_wrap.cc rename to paddle/legacy/cuda/src/hl_warpctc_wrap.cc index 5111bceaff..31a8652f1f 100644 --- a/paddle/cuda/src/hl_warpctc_wrap.cc +++ b/paddle/legacy/cuda/src/hl_warpctc_wrap.cc @@ -14,8 +14,8 @@ limitations under the License. */ #include "hl_warpctc_wrap.h" #include -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Logging.h" namespace dynload { diff --git a/paddle/function/BlockExpandOp.cpp b/paddle/legacy/function/BlockExpandOp.cpp similarity index 99% rename from paddle/function/BlockExpandOp.cpp rename to paddle/legacy/function/BlockExpandOp.cpp index aa53853e08..f01f89a727 100644 --- a/paddle/function/BlockExpandOp.cpp +++ b/paddle/legacy/function/BlockExpandOp.cpp @@ -33,7 +33,7 @@ namespace paddle { * \param outputs[0] Image data of NCHW format. */ class BlockExpandFunction : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { // function arguments strides_ = config.get>("strides"); @@ -81,7 +81,7 @@ public: (size_t)blockW()}); } -protected: + protected: std::vector strides_; std::vector paddings_; std::vector blocks_; @@ -101,7 +101,7 @@ protected: template class BlockExpandForward : public BlockExpandFunction { -public: + public: void init(const FuncConfig& config) override { BlockExpandFunction::init(config); } @@ -149,7 +149,7 @@ public: template class BlockExpandBackward : public BlockExpandFunction { -public: + public: void init(const FuncConfig& config) override { BlockExpandFunction::init(config); } diff --git a/paddle/function/BlockExpandOpTest.cpp b/paddle/legacy/function/BlockExpandOpTest.cpp similarity index 100% rename from paddle/function/BlockExpandOpTest.cpp rename to paddle/legacy/function/BlockExpandOpTest.cpp diff --git a/paddle/function/BufferArg.cpp b/paddle/legacy/function/BufferArg.cpp similarity index 97% rename from paddle/function/BufferArg.cpp rename to paddle/legacy/function/BufferArg.cpp index 2dc931c5d7..1f3d505c31 100644 --- a/paddle/function/BufferArg.cpp +++ b/paddle/legacy/function/BufferArg.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "BufferArg.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" namespace paddle { diff --git a/paddle/function/BufferArg.h b/paddle/legacy/function/BufferArg.h similarity index 98% rename from paddle/function/BufferArg.h rename to paddle/legacy/function/BufferArg.h index 89ee09837d..1f47ad556d 100644 --- a/paddle/function/BufferArg.h +++ b/paddle/legacy/function/BufferArg.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "TensorShape.h" #include "TensorType.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -63,12 +63,12 @@ enum ArgType { ADD_TO = 2, }; class BufferArg { -public: + public: void setArgType(ArgType argType) { argType_ = argType; } ArgType getArgType() const { return argType_; } -public: + public: BufferArg(ValueType valueType, const TensorShape& shape, ArgType argType = UNSPECIFIED) @@ -169,7 +169,7 @@ public: const SequenceArg& sequence() const; const SparseMatrixArg& sparse() const; -protected: + protected: void* buf_; ValueType valueType_; TensorShape shape_; @@ -185,7 +185,7 @@ protected: // valueType_ = int32 // if a < b then value_.buf_[a] < value_.buf_[b] class SequenceIdArg : public BufferArg { -public: + public: SequenceIdArg(const TensorShape& shape, ArgType argType = UNSPECIFIED) : BufferArg(VALUE_TYPE_INT32, shape, argType) { bufferType_ = TENSOR_SEQUENCE_ID; @@ -212,7 +212,7 @@ public: size_t numSeqs() const { return numSeqs_; } -private: + private: size_t numSeqs_; }; @@ -222,7 +222,7 @@ private: // SequenceArg can be used to represent sequences that contain multiple // unequal lengths. class SequenceArg : public BufferArg { -public: + public: SequenceArg(ValueType valueType, const TensorShape& shape, ArgType argType = UNSPECIFIED) @@ -255,7 +255,7 @@ public: SequenceIdArg& getSequenceId() { return startPositions_; } const SequenceIdArg& getSequenceId() const { return startPositions_; } -private: + private: SequenceIdArg startPositions_; }; @@ -263,7 +263,7 @@ private: // valueType_ == float or double // shape_.ndims() == 2 class SparseMatrixArg : public BufferArg { -public: + public: SparseMatrixArg(void* buf, ValueType valueType, const TensorShape& shape, @@ -353,7 +353,7 @@ public: SparseDataType dataType() const { return type_; } -private: + private: BufferArg row_; BufferArg col_; size_t nnz_; diff --git a/paddle/function/BufferArgTest.cpp b/paddle/legacy/function/BufferArgTest.cpp similarity index 96% rename from paddle/function/BufferArgTest.cpp rename to paddle/legacy/function/BufferArgTest.cpp index 1a6e0110af..1ec153bea8 100644 --- a/paddle/function/BufferArgTest.cpp +++ b/paddle/legacy/function/BufferArgTest.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "BufferArg.h" #include -#include "paddle/math/MemoryHandle.h" +#include "paddle/legacy/math/MemoryHandle.h" namespace paddle { diff --git a/paddle/function/CMakeLists.txt b/paddle/legacy/function/CMakeLists.txt similarity index 89% rename from paddle/function/CMakeLists.txt rename to paddle/legacy/function/CMakeLists.txt index 9b2779b42c..29b4ac098e 100644 --- a/paddle/function/CMakeLists.txt +++ b/paddle/legacy/function/CMakeLists.txt @@ -52,9 +52,3 @@ add_simple_unittest(Im2ColTest) add_simple_unittest(GemmConvOpTest) add_simple_unittest(DepthwiseConvOpTest) endif() - -add_style_check_target(paddle_function ${h_files}) -add_style_check_target(paddle_function ${cpp_files}) -if(WITH_GPU) - add_style_check_target(paddle_function ${cu_files}) -endif() diff --git a/paddle/function/ContextProjectionOp.cpp b/paddle/legacy/function/ContextProjectionOp.cpp similarity index 99% rename from paddle/function/ContextProjectionOp.cpp rename to paddle/legacy/function/ContextProjectionOp.cpp index 904b0958e6..05a3f91586 100644 --- a/paddle/function/ContextProjectionOp.cpp +++ b/paddle/legacy/function/ContextProjectionOp.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ContextProjectionOp.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { /** @@ -100,7 +100,7 @@ void ContextProjectionForward(CpuMatrix& out_mat, */ template class ContextProjectionForwardFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { context_length_ = config.get("context_length"); context_start_ = config.get("context_start"); @@ -146,7 +146,7 @@ public: begin_pad_); } -private: + private: size_t context_length_; int context_start_; size_t begin_pad_; @@ -223,7 +223,7 @@ void ContextProjectionBackward(const CpuMatrix& out_grad_mat, */ template class ContextProjectionBackwardFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { context_length_ = config.get("context_length"); context_start_ = config.get("context_start"); @@ -278,7 +278,7 @@ public: total_pad_); } -private: + private: size_t context_length_; int context_start_; size_t begin_pad_; @@ -299,7 +299,7 @@ private: */ template class ContextProjectionBackwardDataFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { context_length_ = config.get("context_length"); context_start_ = config.get("context_start"); @@ -331,7 +331,7 @@ public: out_grad_mat, in_grad_mat, seq_vec, context_length_, context_start_); } -private: + private: size_t context_length_; int context_start_; }; @@ -348,7 +348,7 @@ private: */ template class ContextProjectionBackwardWeightFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { context_length_ = config.get("context_length"); context_start_ = config.get("context_start"); @@ -382,7 +382,7 @@ public: begin_pad_); } -private: + private: size_t context_length_; int context_start_; size_t begin_pad_; diff --git a/paddle/function/ContextProjectionOp.h b/paddle/legacy/function/ContextProjectionOp.h similarity index 100% rename from paddle/function/ContextProjectionOp.h rename to paddle/legacy/function/ContextProjectionOp.h diff --git a/paddle/function/ContextProjectionOpGpu.cu b/paddle/legacy/function/ContextProjectionOpGpu.cu similarity index 100% rename from paddle/function/ContextProjectionOpGpu.cu rename to paddle/legacy/function/ContextProjectionOpGpu.cu diff --git a/paddle/function/ContextProjectionOpTest.cpp b/paddle/legacy/function/ContextProjectionOpTest.cpp similarity index 99% rename from paddle/function/ContextProjectionOpTest.cpp rename to paddle/legacy/function/ContextProjectionOpTest.cpp index d805c3ae92..3b0a34567f 100644 --- a/paddle/function/ContextProjectionOpTest.cpp +++ b/paddle/legacy/function/ContextProjectionOpTest.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "FunctionTest.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" #include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/function/ConvOp.h b/paddle/legacy/function/ConvOp.h similarity index 99% rename from paddle/function/ConvOp.h rename to paddle/legacy/function/ConvOp.h index 7d23d0079c..2d8437bcfe 100644 --- a/paddle/function/ConvOp.h +++ b/paddle/legacy/function/ConvOp.h @@ -56,7 +56,7 @@ namespace paddle { * H and W is height and width of filter. */ class ConvFunctionBase : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { // function arguments strides_ = config.get>("strides"); @@ -101,7 +101,7 @@ public: } } -protected: + protected: size_t getFilterHeight(const TensorShape& filter) const { return filter[filter.ndims() - 2]; } diff --git a/paddle/function/ConvOpTest.h b/paddle/legacy/function/ConvOpTest.h similarity index 100% rename from paddle/function/ConvOpTest.h rename to paddle/legacy/function/ConvOpTest.h diff --git a/paddle/function/CosSimOp.cpp b/paddle/legacy/function/CosSimOp.cpp similarity index 98% rename from paddle/function/CosSimOp.cpp rename to paddle/legacy/function/CosSimOp.cpp index 81bccc1a9c..d04f4396ca 100644 --- a/paddle/function/CosSimOp.cpp +++ b/paddle/legacy/function/CosSimOp.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CosSimOp.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { /** @@ -97,7 +97,7 @@ class CosSimForwardFunc : public FunctionBase { CosSimForward(out_mat, in1_mat, in2_mat, scale_); } -private: + private: real scale_; }; @@ -227,7 +227,7 @@ class CosSimBackwardFunc : public FunctionBase { out_grad, out_val, in1_val, in2_val, in1_grad, in2_grad, scale_); } -private: + private: real scale_; }; diff --git a/paddle/function/CosSimOp.h b/paddle/legacy/function/CosSimOp.h similarity index 100% rename from paddle/function/CosSimOp.h rename to paddle/legacy/function/CosSimOp.h diff --git a/paddle/function/CosSimOpGpu.cu b/paddle/legacy/function/CosSimOpGpu.cu similarity index 100% rename from paddle/function/CosSimOpGpu.cu rename to paddle/legacy/function/CosSimOpGpu.cu diff --git a/paddle/function/CosSimOpTest.cpp b/paddle/legacy/function/CosSimOpTest.cpp similarity index 98% rename from paddle/function/CosSimOpTest.cpp rename to paddle/legacy/function/CosSimOpTest.cpp index 42b02da0cb..31bb43e1ba 100644 --- a/paddle/function/CosSimOpTest.cpp +++ b/paddle/legacy/function/CosSimOpTest.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "FunctionTest.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" using namespace paddle; // NOLINT diff --git a/paddle/function/CropOp.cpp b/paddle/legacy/function/CropOp.cpp similarity index 97% rename from paddle/function/CropOp.cpp rename to paddle/legacy/function/CropOp.cpp index 7aa527d216..e22678822f 100644 --- a/paddle/function/CropOp.cpp +++ b/paddle/legacy/function/CropOp.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CropOp.h" -#include "paddle/function/TensorShape.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/function/TensorShape.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { @@ -112,7 +112,7 @@ void CropGrad(const real* inGrad, */ template class CropFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { conf_ = config; } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -130,7 +130,7 @@ public: conf_); } -private: + private: FuncConfig conf_; }; @@ -145,7 +145,7 @@ private: template class CropGradFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { conf_ = config; } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -163,7 +163,7 @@ public: conf_); } -private: + private: FuncConfig conf_; }; diff --git a/paddle/function/CropOp.h b/paddle/legacy/function/CropOp.h similarity index 100% rename from paddle/function/CropOp.h rename to paddle/legacy/function/CropOp.h diff --git a/paddle/function/CropOpGpu.cu b/paddle/legacy/function/CropOpGpu.cu similarity index 100% rename from paddle/function/CropOpGpu.cu rename to paddle/legacy/function/CropOpGpu.cu diff --git a/paddle/function/CropOpTest.cpp b/paddle/legacy/function/CropOpTest.cpp similarity index 100% rename from paddle/function/CropOpTest.cpp rename to paddle/legacy/function/CropOpTest.cpp diff --git a/paddle/function/CrossMapNormalOp.cpp b/paddle/legacy/function/CrossMapNormalOp.cpp similarity index 99% rename from paddle/function/CrossMapNormalOp.cpp rename to paddle/legacy/function/CrossMapNormalOp.cpp index 75c0fc2a3d..f28703af00 100644 --- a/paddle/function/CrossMapNormalOp.cpp +++ b/paddle/legacy/function/CrossMapNormalOp.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CrossMapNormalOp.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { @@ -160,7 +160,7 @@ void CrossMapNormalGrad(real* inputsGrad, */ template class CrossMapNormalFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { // function arguments size_ = config.get("size"); @@ -220,7 +220,7 @@ public: return ops; } -private: + private: size_t size_; real scale_; real pow_; @@ -260,7 +260,7 @@ private: */ template class CrossMapNormalGradFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { // function arguments size_ = config.get("size"); @@ -328,7 +328,7 @@ public: return ops; } -private: + private: size_t size_; real scale_; real pow_; diff --git a/paddle/function/CrossMapNormalOp.h b/paddle/legacy/function/CrossMapNormalOp.h similarity index 100% rename from paddle/function/CrossMapNormalOp.h rename to paddle/legacy/function/CrossMapNormalOp.h diff --git a/paddle/function/CrossMapNormalOpGpu.cu b/paddle/legacy/function/CrossMapNormalOpGpu.cu similarity index 100% rename from paddle/function/CrossMapNormalOpGpu.cu rename to paddle/legacy/function/CrossMapNormalOpGpu.cu diff --git a/paddle/function/CrossMapNormalOpTest.cpp b/paddle/legacy/function/CrossMapNormalOpTest.cpp similarity index 100% rename from paddle/function/CrossMapNormalOpTest.cpp rename to paddle/legacy/function/CrossMapNormalOpTest.cpp diff --git a/paddle/function/DepthwiseConvOp.cpp b/paddle/legacy/function/DepthwiseConvOp.cpp similarity index 99% rename from paddle/function/DepthwiseConvOp.cpp rename to paddle/legacy/function/DepthwiseConvOp.cpp index 46651345b4..958034e08e 100644 --- a/paddle/function/DepthwiseConvOp.cpp +++ b/paddle/legacy/function/DepthwiseConvOp.cpp @@ -19,7 +19,7 @@ namespace paddle { template class DepthwiseConvFunctor { -public: + public: void operator()(const T* inputData, const T* filterData, int batchSize, @@ -43,7 +43,7 @@ public: template class DepthwiseConvGradInputFunctor { -public: + public: void operator()(const T* outputGrad, const T* filterData, int batchSize, @@ -66,7 +66,7 @@ public: template class DepthwiseConvGradFilterFunctor { -public: + public: void operator()(const T* outputGrad, const T* inputData, int batchSize, @@ -93,7 +93,7 @@ public: */ template class DepthwiseConvFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } @@ -156,7 +156,7 @@ public: */ template class DepthwiseConvGradInputFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } @@ -220,7 +220,7 @@ public: */ template class DepthwiseConvGradFilterFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } diff --git a/paddle/function/DepthwiseConvOp.h b/paddle/legacy/function/DepthwiseConvOp.h similarity index 99% rename from paddle/function/DepthwiseConvOp.h rename to paddle/legacy/function/DepthwiseConvOp.h index 6700747314..7837edd1c0 100644 --- a/paddle/function/DepthwiseConvOp.h +++ b/paddle/legacy/function/DepthwiseConvOp.h @@ -44,7 +44,7 @@ namespace paddle { */ template class DepthwiseConvFunctor { -public: + public: void operator()(const T* inputData, const T* filterData, int batchSize, @@ -89,7 +89,7 @@ public: */ template class DepthwiseConvGradInputFunctor { -public: + public: void operator()(const T* outputGrad, const T* filterData, int batchSize, @@ -135,7 +135,7 @@ public: */ template class DepthwiseConvGradFilterFunctor { -public: + public: void operator()(const T* outputGrad, const T* inputData, int batchSize, diff --git a/paddle/function/DepthwiseConvOpGpu.cu b/paddle/legacy/function/DepthwiseConvOpGpu.cu similarity index 99% rename from paddle/function/DepthwiseConvOpGpu.cu rename to paddle/legacy/function/DepthwiseConvOpGpu.cu index cd1d55a416..17138cc563 100644 --- a/paddle/function/DepthwiseConvOpGpu.cu +++ b/paddle/legacy/function/DepthwiseConvOpGpu.cu @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "DepthwiseConvOp.h" -#include "paddle/math/BaseMatrix.h" +#include "paddle/legacy/math/BaseMatrix.h" namespace paddle { @@ -199,7 +199,7 @@ __global__ void ConvolutionDepthwiseFilterBackward(const int num_i, template class DepthwiseConvFunctor { -public: + public: void operator()(const T* inputData, const T* filterData, int batchSize, @@ -249,7 +249,7 @@ public: template class DepthwiseConvGradInputFunctor { -public: + public: void operator()(const T* outputGrad, const T* filterData, int batchSize, @@ -300,7 +300,7 @@ public: template class DepthwiseConvGradFilterFunctor { -public: + public: void operator()(const T* outputGrad, const T* inputData, int batchSize, diff --git a/paddle/function/DepthwiseConvOpTest.cpp b/paddle/legacy/function/DepthwiseConvOpTest.cpp similarity index 100% rename from paddle/function/DepthwiseConvOpTest.cpp rename to paddle/legacy/function/DepthwiseConvOpTest.cpp diff --git a/paddle/function/EigenGemm.cpp b/paddle/legacy/function/EigenGemm.cpp similarity index 84% rename from paddle/function/EigenGemm.cpp rename to paddle/legacy/function/EigenGemm.cpp index bac4659e62..5929c5c68e 100644 --- a/paddle/function/EigenGemm.cpp +++ b/paddle/legacy/function/EigenGemm.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "unsupported/Eigen/CXX11/Tensor" +#include "paddle/legacy/function/EigenThreadDevice.h" namespace paddle { @@ -70,25 +70,26 @@ struct EigenBlasGemm { dims[0].first = transA ? 0 : 1; dims[0].second = transB ? 1 : 0; - Eigen::DefaultDevice device; + auto* device = EigenDeviceWarpper::device(); if (N == ldc) { if (alpha == T(1) && beta == T(0)) { - c.device(device) = a.contract(b, dims); + c.device(*device) = a.contract(b, dims); } else if (alpha == T(1) && beta == T(1)) { - c.device(device) += a.contract(b, dims); + c.device(*device) += a.contract(b, dims); } else { - c.device(device) = alpha * a.contract(b, dims) + beta * c; + c.device(*device) = alpha * a.contract(b, dims) + beta * c; } } else { if (alpha == T(1) && beta == T(0)) { - c.slice(offsetC, extentC).device(device) = a.contract(b, dims); + c.slice(offsetC, extentC).device(*device) = a.contract(b, dims); } else if (alpha == T(1) && beta == T(1)) { - c.slice(offsetC, extentC).device(device) += a.contract(b, dims); + c.slice(offsetC, extentC).device(*device) += a.contract(b, dims); } else { - c.slice(offsetC, extentC).device(device) = + c.slice(offsetC, extentC).device(*device) = alpha * a.contract(b, dims) + beta * c.slice(offsetC, extentC); } } + EigenDeviceWarpper::free_device(device); } }; diff --git a/paddle/legacy/function/EigenThreadDevice.h b/paddle/legacy/function/EigenThreadDevice.h new file mode 100644 index 0000000000..eb92251c82 --- /dev/null +++ b/paddle/legacy/function/EigenThreadDevice.h @@ -0,0 +1,73 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. */ + +#pragma once + +#if defined(__OSX__) || defined(__APPLE__) +#include +#include +#endif +#include "unsupported/Eigen/CXX11/Tensor" + +namespace paddle { + +#if defined(__ANDROID__) +int GetCpuCount() { + FILE* fp = fopen("/sys/devices/system/cpu/possible", "r"); + if (!fp) { + return 1; + } + int rank0, rank1; + int num = fscanf(fp, "%d-%d", &rank0, &rank1); + fclose(fp); + if (num < 2) return 1; + return rank1 + 1; +} +#elif defined(__OSX__) || defined(__APPLE__) +int GetCpuCount() { + int count = 0; + size_t len = sizeof(int); + sysctlbyname("hw.ncpu", &count, &len, NULL, 0); + return count > 0 ? count : 1; +} +#else +int GetCpuCount() { return 1; } +#endif + +class EigenDeviceWarpper { + public: // NOLINT +#if EIGEN_USE_THREADS + static Eigen::ThreadPoolDevice* device() { + const int num_cpus = GetCpuCount(); + const int num_threads = (num_cpus > 2) ? 2 : num_cpus; + static Eigen::ThreadPool tp(num_threads); + static Eigen::ThreadPoolDevice* device = + new Eigen::ThreadPoolDevice(&tp, num_threads); + return device; + } + + static void free_device(Eigen::ThreadPoolDevice* device) { + // do nothing + } +#else + static Eigen::DefaultDevice* device() { + Eigen::DefaultDevice* device = new Eigen::DefaultDevice; + return device; + } + + static void free_device(Eigen::DefaultDevice* device) { delete device; } +#endif +}; + +} // namespace paddle diff --git a/paddle/function/Function.cpp b/paddle/legacy/function/Function.cpp similarity index 100% rename from paddle/function/Function.cpp rename to paddle/legacy/function/Function.cpp diff --git a/paddle/function/Function.h b/paddle/legacy/function/Function.h similarity index 96% rename from paddle/function/Function.h rename to paddle/legacy/function/Function.h index 01288ef92e..bc5ef7e6f2 100644 --- a/paddle/function/Function.h +++ b/paddle/legacy/function/Function.h @@ -17,10 +17,10 @@ limitations under the License. */ #include #include #include "BufferArg.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Any.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Error.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Any.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Error.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { * The argument type of Function::init. */ class FuncConfig { -public: + public: template T get(const std::string& key, Error* err = nullptr) const { try { @@ -59,7 +59,7 @@ public: return *this; } -protected: + protected: mutable std::unordered_map valueMap_; }; @@ -77,7 +77,7 @@ protected: * in the BufferArgs life time. */ class BufferArgs { -public: + public: BufferArgs() {} ~BufferArgs() { @@ -137,7 +137,7 @@ public: void addArg(SparseMatrixArg& arg) { args_.push_back(&arg); } -private: + private: std::vector args_; // The BufferArg object is constructed and freed by BufferArgs. std::vector _args_; @@ -163,7 +163,7 @@ private: * If Function has more than one output, each output can have different modes. */ class FunctionBase { -public: + public: virtual ~FunctionBase() {} virtual void init(const FuncConfig& config) {} @@ -192,7 +192,7 @@ public: static ClassRegistrar funcRegistrar_; -protected: + protected: // numInputs_ and numOutputs_ represents the maximum // input and output supported by Function. // Some functions are optimized for input and output, diff --git a/paddle/function/FunctionTest.cpp b/paddle/legacy/function/FunctionTest.cpp similarity index 99% rename from paddle/function/FunctionTest.cpp rename to paddle/legacy/function/FunctionTest.cpp index f5e6ca3f51..1a0993e313 100644 --- a/paddle/function/FunctionTest.cpp +++ b/paddle/legacy/function/FunctionTest.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Function.h" #include -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" namespace paddle { diff --git a/paddle/function/FunctionTest.h b/paddle/legacy/function/FunctionTest.h similarity index 98% rename from paddle/function/FunctionTest.h rename to paddle/legacy/function/FunctionTest.h index 56c3537b6a..6f01981a34 100644 --- a/paddle/function/FunctionTest.h +++ b/paddle/legacy/function/FunctionTest.h @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Function.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" -#include "paddle/math/tests/TensorCheck.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/math/tests/TensorCheck.h" #include "paddle/testing/TestUtil.h" namespace paddle { @@ -39,7 +39,7 @@ struct Allocator { // Copy argument1 to argument2 template class CopyArgument { -public: + public: void operator()(const BufferArg& arg1, BufferArg& arg2) { CHECK_EQ(arg1.valueType(), arg2.valueType()); CHECK_LE(arg1.shape().getElements(), arg2.shape().getElements()); @@ -95,7 +95,7 @@ public: */ template class Compare2Function { -public: + public: typedef typename test::Allocator::type Allocator1; typedef typename test::Allocator::type Allocator2; typedef typename Tensor::Vector Vector1; @@ -305,7 +305,7 @@ public: std::shared_ptr getFunction2() const { return function2_; } -protected: + protected: // only init cpu argument, gpu argument copy from cpu argument. void initArg(BufferArg& arg) { Vector1 vector(arg.shape().getElements(), (real*)arg.data()); @@ -381,7 +381,7 @@ protected: } } -protected: + protected: std::shared_ptr function1_; std::shared_ptr function2_; std::vector> func1Memory_; @@ -400,7 +400,7 @@ protected: class CpuGpuFuncCompare : public Compare2Function { -public: + public: CpuGpuFuncCompare(const std::string& name, const FuncConfig& config) : Compare2Function(name + "-CPU", name + "-GPU", config) {} diff --git a/paddle/function/GemmConvOp.cpp b/paddle/legacy/function/GemmConvOp.cpp similarity index 99% rename from paddle/function/GemmConvOp.cpp rename to paddle/legacy/function/GemmConvOp.cpp index 2b7c6f9eab..5a81315661 100644 --- a/paddle/function/GemmConvOp.cpp +++ b/paddle/legacy/function/GemmConvOp.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "ConvOp.h" #include "GemmFunctor.h" #include "Im2Col.h" -#include "paddle/math/MemoryHandle.h" +#include "paddle/legacy/math/MemoryHandle.h" namespace paddle { @@ -24,7 +24,7 @@ namespace paddle { */ template class GemmConvFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } @@ -136,7 +136,7 @@ public: */ template class GemmConvMobileFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } @@ -297,7 +297,7 @@ public: */ template class GemmConvGradInputFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } @@ -404,7 +404,7 @@ public: */ template class GemmConvGradFilterFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } diff --git a/paddle/function/GemmConvOpTest.cpp b/paddle/legacy/function/GemmConvOpTest.cpp similarity index 100% rename from paddle/function/GemmConvOpTest.cpp rename to paddle/legacy/function/GemmConvOpTest.cpp diff --git a/paddle/function/GemmFunctor.cpp b/paddle/legacy/function/GemmFunctor.cpp similarity index 98% rename from paddle/function/GemmFunctor.cpp rename to paddle/legacy/function/GemmFunctor.cpp index 0b1fe1b67d..450293dfee 100644 --- a/paddle/function/GemmFunctor.cpp +++ b/paddle/legacy/function/GemmFunctor.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "GemmFunctor.h" -#include "paddle/math/MathFunctions.h" +#include "paddle/legacy/math/MathFunctions.h" namespace paddle { diff --git a/paddle/function/GemmFunctor.h b/paddle/legacy/function/GemmFunctor.h similarity index 100% rename from paddle/function/GemmFunctor.h rename to paddle/legacy/function/GemmFunctor.h diff --git a/paddle/function/GruFunctor.h b/paddle/legacy/function/GruFunctor.h similarity index 100% rename from paddle/function/GruFunctor.h rename to paddle/legacy/function/GruFunctor.h diff --git a/paddle/function/Im2Col.h b/paddle/legacy/function/Im2Col.h similarity index 99% rename from paddle/function/Im2Col.h rename to paddle/legacy/function/Im2Col.h index 6a07787000..e0ce6918a2 100644 --- a/paddle/function/Im2Col.h +++ b/paddle/legacy/function/Im2Col.h @@ -70,7 +70,7 @@ enum ColFormat { kCFO = 0, kOCF = 1 }; */ template class Im2ColFunctor { -public: + public: void operator()(const T* imData, const TensorShape& imShape, T* colData, @@ -85,7 +85,7 @@ public: template class Col2ImFunctor { -public: + public: void operator()(T* imData, const TensorShape& imShape, const T* colData, @@ -100,7 +100,7 @@ public: template class Im2ColMobileFunctor { -public: + public: void operator()(const T* imData, const TensorShape& imShape, T* colData, diff --git a/paddle/function/Im2ColOp.cpp b/paddle/legacy/function/Im2ColOp.cpp similarity index 99% rename from paddle/function/Im2ColOp.cpp rename to paddle/legacy/function/Im2ColOp.cpp index ad2aed8f3c..55a3ff98db 100644 --- a/paddle/function/Im2ColOp.cpp +++ b/paddle/legacy/function/Im2ColOp.cpp @@ -23,7 +23,7 @@ namespace paddle { */ template class Im2ColFunctor { -public: + public: void operator()(const T* imData, const TensorShape& imShape, T* colData, @@ -75,7 +75,7 @@ public: */ template class Col2ImFunctor { -public: + public: void operator()(T* imData, const TensorShape& imShape, const T* colData, @@ -130,7 +130,7 @@ template class Col2ImFunctor; */ template class Im2ColFunctor { -public: + public: void operator()(const T* imData, const TensorShape& imShape, T* colData, @@ -188,7 +188,7 @@ public: */ template class Col2ImFunctor { -public: + public: void operator()(T* imData, const TensorShape& imShape, const T* colData, diff --git a/paddle/function/Im2ColOpGpu.cu b/paddle/legacy/function/Im2ColOpGpu.cu similarity index 99% rename from paddle/function/Im2ColOpGpu.cu rename to paddle/legacy/function/Im2ColOpGpu.cu index a944a0ee68..96dd8f528e 100644 --- a/paddle/function/Im2ColOpGpu.cu +++ b/paddle/legacy/function/Im2ColOpGpu.cu @@ -71,7 +71,7 @@ __global__ void im2col(const T* data_im, */ template class Im2ColFunctor { -public: + public: void operator()(const T* imData, const TensorShape& imShape, T* colData, @@ -184,7 +184,7 @@ __global__ void col2im(size_t n, */ template class Col2ImFunctor { -public: + public: void operator()(T* imData, const TensorShape& imShape, const T* colData, @@ -292,7 +292,7 @@ __global__ void im2colOCF(const T* imData, */ template class Im2ColFunctor { -public: + public: void operator()(const T* imData, const TensorShape& imShape, T* colData, @@ -399,7 +399,7 @@ __global__ void col2imOCF(T* imData, */ template class Col2ImFunctor { -public: + public: void operator()(T* imData, const TensorShape& imShape, const T* colData, diff --git a/paddle/function/Im2ColTest.cpp b/paddle/legacy/function/Im2ColTest.cpp similarity index 99% rename from paddle/function/Im2ColTest.cpp rename to paddle/legacy/function/Im2ColTest.cpp index 967c5b9153..2c5f06f389 100644 --- a/paddle/function/Im2ColTest.cpp +++ b/paddle/legacy/function/Im2ColTest.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "Im2Col.h" #include #include "Function.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/tests/TensorCheck.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/tests/TensorCheck.h" namespace paddle { diff --git a/paddle/function/MulOp.cpp b/paddle/legacy/function/MulOp.cpp similarity index 99% rename from paddle/function/MulOp.cpp rename to paddle/legacy/function/MulOp.cpp index 90cd4a2b6d..750978fc90 100644 --- a/paddle/function/MulOp.cpp +++ b/paddle/legacy/function/MulOp.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "MulOp.h" #include "GemmFunctor.h" -#include "paddle/math/SIMDFunctions.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/SIMDFunctions.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace { inline void vecAddTo(real* a, const real* b, real scaleB, size_t len) { @@ -240,7 +240,7 @@ void MulOp(CpuMatrix& out, */ template class MulFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { aTrans_ = config.get("aTrans"); bTrans_ = config.get("bTrans"); @@ -335,7 +335,7 @@ public: } } -private: + private: bool aTrans_; bool bTrans_; }; diff --git a/paddle/function/MulOp.h b/paddle/legacy/function/MulOp.h similarity index 97% rename from paddle/function/MulOp.h rename to paddle/legacy/function/MulOp.h index e6057be4e5..ab33bde172 100644 --- a/paddle/function/MulOp.h +++ b/paddle/legacy/function/MulOp.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Function.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" namespace paddle { /// CPU, dense matrix (+)= dense matrix * dense matrix diff --git a/paddle/function/MulOpGpu.cu b/paddle/legacy/function/MulOpGpu.cu similarity index 98% rename from paddle/function/MulOpGpu.cu rename to paddle/legacy/function/MulOpGpu.cu index d63416a8e4..217c983cb7 100644 --- a/paddle/function/MulOpGpu.cu +++ b/paddle/legacy/function/MulOpGpu.cu @@ -14,8 +14,8 @@ limitations under the License. */ #include "MulOp.h" #include "hl_base.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" namespace paddle { /// dense matrix (+)= dense matrix * dense matrix diff --git a/paddle/function/MulOpTest.cpp b/paddle/legacy/function/MulOpTest.cpp similarity index 98% rename from paddle/function/MulOpTest.cpp rename to paddle/legacy/function/MulOpTest.cpp index 4e1ebd749c..ab08b6f869 100644 --- a/paddle/function/MulOpTest.cpp +++ b/paddle/legacy/function/MulOpTest.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include #include "FunctionTest.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" -#include "paddle/math/tests/test_matrixUtil.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/math/tests/test_matrixUtil.h" #include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/function/NaiveConvOp.cpp b/paddle/legacy/function/NaiveConvOp.cpp similarity index 99% rename from paddle/function/NaiveConvOp.cpp rename to paddle/legacy/function/NaiveConvOp.cpp index 22d3b33d0f..99c8b81acb 100644 --- a/paddle/function/NaiveConvOp.cpp +++ b/paddle/legacy/function/NaiveConvOp.cpp @@ -24,7 +24,7 @@ namespace paddle { */ template class NaiveConvFunctor { -public: + public: void operator()(const T* inputData, size_t batchSize, size_t inputChannels, @@ -85,7 +85,7 @@ public: template class NaiveConvFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } diff --git a/paddle/function/PadOp.cpp b/paddle/legacy/function/PadOp.cpp similarity index 98% rename from paddle/function/PadOp.cpp rename to paddle/legacy/function/PadOp.cpp index db6dd518ca..9d011d28e6 100644 --- a/paddle/function/PadOp.cpp +++ b/paddle/legacy/function/PadOp.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PadOp.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { @@ -132,7 +132,7 @@ static inline PadConf castToPadConf(const FuncConfig& conf) { template class PadFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { pad_ = castToPadConf(config); } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -157,7 +157,7 @@ public: pad_); } -private: + private: PadConf pad_; }; @@ -173,7 +173,7 @@ private: template class PadGradFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { pad_ = castToPadConf(config); } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -201,7 +201,7 @@ public: pad_); } -private: + private: PadConf pad_; }; diff --git a/paddle/function/PadOp.h b/paddle/legacy/function/PadOp.h similarity index 100% rename from paddle/function/PadOp.h rename to paddle/legacy/function/PadOp.h diff --git a/paddle/function/PadOpGpu.cu b/paddle/legacy/function/PadOpGpu.cu similarity index 100% rename from paddle/function/PadOpGpu.cu rename to paddle/legacy/function/PadOpGpu.cu diff --git a/paddle/function/PadOpTest.cpp b/paddle/legacy/function/PadOpTest.cpp similarity index 100% rename from paddle/function/PadOpTest.cpp rename to paddle/legacy/function/PadOpTest.cpp diff --git a/paddle/function/RowConvOp.cpp b/paddle/legacy/function/RowConvOp.cpp similarity index 99% rename from paddle/function/RowConvOp.cpp rename to paddle/legacy/function/RowConvOp.cpp index 925860346e..3be50e80d7 100644 --- a/paddle/function/RowConvOp.cpp +++ b/paddle/legacy/function/RowConvOp.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "RowConvOp.h" #include -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { @@ -129,7 +129,7 @@ void RowConvGrad(const CpuMatrix& outG, template class RowConvFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override {} void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -176,7 +176,7 @@ public: template class RowConvGradFunc : public FunctionBase { // TODO(qingqing): split into RowConvDataFunc and RowConvWeightFunc -public: + public: void init(const FuncConfig& config) override {} void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { diff --git a/paddle/function/RowConvOp.h b/paddle/legacy/function/RowConvOp.h similarity index 100% rename from paddle/function/RowConvOp.h rename to paddle/legacy/function/RowConvOp.h diff --git a/paddle/function/RowConvOpGpu.cu b/paddle/legacy/function/RowConvOpGpu.cu similarity index 99% rename from paddle/function/RowConvOpGpu.cu rename to paddle/legacy/function/RowConvOpGpu.cu index f820ee9a97..a6d2e4c7e3 100644 --- a/paddle/function/RowConvOpGpu.cu +++ b/paddle/legacy/function/RowConvOpGpu.cu @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/cuda/include/hl_base.h" -#include "paddle/function/RowConvOp.h" +#include "paddle/legacy/cuda/include/hl_base.h" +#include "paddle/legacy/function/RowConvOp.h" namespace paddle { diff --git a/paddle/function/RowConvOpTest.cpp b/paddle/legacy/function/RowConvOpTest.cpp similarity index 100% rename from paddle/function/RowConvOpTest.cpp rename to paddle/legacy/function/RowConvOpTest.cpp diff --git a/paddle/function/ScaleSubRegionOp.cpp b/paddle/legacy/function/ScaleSubRegionOp.cpp similarity index 98% rename from paddle/function/ScaleSubRegionOp.cpp rename to paddle/legacy/function/ScaleSubRegionOp.cpp index 6ed6eb2dba..03a422a740 100644 --- a/paddle/function/ScaleSubRegionOp.cpp +++ b/paddle/legacy/function/ScaleSubRegionOp.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ScaleSubRegionOp.h" -#include "paddle/function/TensorShape.h" +#include "paddle/legacy/function/TensorShape.h" namespace paddle { @@ -92,7 +92,7 @@ void ScaleSubRegionGrad(const real* inGrad, */ template class ScaleSubRegionFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { conf_ = config; } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -109,7 +109,7 @@ public: conf_); } -private: + private: FuncConfig conf_; }; @@ -124,7 +124,7 @@ private: template class ScaleSubRegionGradFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override { conf_ = config; } void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -141,7 +141,7 @@ public: conf_); } -private: + private: FuncConfig conf_; }; diff --git a/paddle/function/ScaleSubRegionOp.h b/paddle/legacy/function/ScaleSubRegionOp.h similarity index 100% rename from paddle/function/ScaleSubRegionOp.h rename to paddle/legacy/function/ScaleSubRegionOp.h diff --git a/paddle/function/ScaleSubRegionOpGpu.cu b/paddle/legacy/function/ScaleSubRegionOpGpu.cu similarity index 100% rename from paddle/function/ScaleSubRegionOpGpu.cu rename to paddle/legacy/function/ScaleSubRegionOpGpu.cu diff --git a/paddle/function/ScaleSubRegionOpTest.cpp b/paddle/legacy/function/ScaleSubRegionOpTest.cpp similarity index 100% rename from paddle/function/ScaleSubRegionOpTest.cpp rename to paddle/legacy/function/ScaleSubRegionOpTest.cpp diff --git a/paddle/function/SwitchOp.cpp b/paddle/legacy/function/SwitchOp.cpp similarity index 98% rename from paddle/function/SwitchOp.cpp rename to paddle/legacy/function/SwitchOp.cpp index 50e1d6c04c..c6accd1803 100644 --- a/paddle/function/SwitchOp.cpp +++ b/paddle/legacy/function/SwitchOp.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SwitchOp.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { @@ -75,7 +75,7 @@ void NHWC2NCHW(real* outputs, */ template class NCHW2NHWCFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override {} void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { @@ -108,7 +108,7 @@ public: */ template class NHWC2NCHWFunc : public FunctionBase { -public: + public: void init(const FuncConfig& config) override {} void calc(const BufferArgs& inputs, const BufferArgs& outputs) override { diff --git a/paddle/function/SwitchOp.h b/paddle/legacy/function/SwitchOp.h similarity index 100% rename from paddle/function/SwitchOp.h rename to paddle/legacy/function/SwitchOp.h diff --git a/paddle/function/SwitchOpGpu.cu b/paddle/legacy/function/SwitchOpGpu.cu similarity index 100% rename from paddle/function/SwitchOpGpu.cu rename to paddle/legacy/function/SwitchOpGpu.cu diff --git a/paddle/function/SwitchOpTest.cpp b/paddle/legacy/function/SwitchOpTest.cpp similarity index 100% rename from paddle/function/SwitchOpTest.cpp rename to paddle/legacy/function/SwitchOpTest.cpp diff --git a/paddle/function/TensorShape.h b/paddle/legacy/function/TensorShape.h similarity index 99% rename from paddle/function/TensorShape.h rename to paddle/legacy/function/TensorShape.h index 02d38c32c0..d4d1eae396 100644 --- a/paddle/function/TensorShape.h +++ b/paddle/legacy/function/TensorShape.h @@ -22,7 +22,7 @@ namespace paddle { * TensorShape used to represent shape of normal tensor. */ class TensorShape { -public: + public: TensorShape() : ndims_(0), nelements_(0) { initDims(0); } TensorShape(size_t ndims) : ndims_(ndims), nelements_(1) { initDims(ndims); }; @@ -80,7 +80,7 @@ public: bool operator!=(const TensorShape& t) const { return !(*this == t); } -private: + private: // compute number of elements void numElements() { nelements_ = 1; diff --git a/paddle/function/TensorShapeTest.cpp b/paddle/legacy/function/TensorShapeTest.cpp similarity index 100% rename from paddle/function/TensorShapeTest.cpp rename to paddle/legacy/function/TensorShapeTest.cpp diff --git a/paddle/function/TensorType.h b/paddle/legacy/function/TensorType.h similarity index 98% rename from paddle/function/TensorType.h rename to paddle/legacy/function/TensorType.h index b384591bd8..13994821be 100644 --- a/paddle/function/TensorType.h +++ b/paddle/legacy/function/TensorType.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { diff --git a/paddle/function/TensorTypeTest.cpp b/paddle/legacy/function/TensorTypeTest.cpp similarity index 100% rename from paddle/function/TensorTypeTest.cpp rename to paddle/legacy/function/TensorTypeTest.cpp diff --git a/paddle/function/neon/NeonDepthwiseConv.cpp b/paddle/legacy/function/neon/NeonDepthwiseConv.cpp similarity index 85% rename from paddle/function/neon/NeonDepthwiseConv.cpp rename to paddle/legacy/function/neon/NeonDepthwiseConv.cpp index d3298c7538..6179635a9f 100644 --- a/paddle/function/neon/NeonDepthwiseConv.cpp +++ b/paddle/legacy/function/neon/NeonDepthwiseConv.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "NeonDepthwiseConv.h" -#include "paddle/function/ConvOp.h" +#include "paddle/legacy/function/ConvOp.h" namespace paddle { @@ -21,7 +21,7 @@ namespace paddle { template class NeonDepthwiseConvFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } @@ -66,18 +66,18 @@ public: float* inputPadding = inputData; int padInputHeight = inputHeight + 2 * paddingH(); int padInputWidth = inputWidth + 2 * paddingW(); - if (paddingH() > 0 || paddingW() > 0) { - int newSize = batchSize * inputChannels * padInputHeight * padInputWidth; - resizeBuffer(newSize); - inputPadding = reinterpret_cast(memory_->getBuf()); - neon::Padding::run(inputData, - inputPadding, - batchSize * inputChannels, - inputHeight, - inputWidth, - padInputHeight, - padInputWidth); - } + int newSize = + batchSize * (inputChannels + 1) * padInputHeight * padInputWidth; + + resizeBuffer(newSize); + inputPadding = reinterpret_cast(memory_->getBuf()); + neon::Padding::run(inputData, + inputPadding, + batchSize * inputChannels, + inputHeight, + inputWidth, + padInputHeight, + padInputWidth); std::function diff --git a/paddle/function/neon/NeonDepthwiseConv.h b/paddle/legacy/function/neon/NeonDepthwiseConv.h similarity index 100% rename from paddle/function/neon/NeonDepthwiseConv.h rename to paddle/legacy/function/neon/NeonDepthwiseConv.h diff --git a/paddle/function/neon/NeonDepthwiseConvTranspose.cpp b/paddle/legacy/function/neon/NeonDepthwiseConvTranspose.cpp similarity index 98% rename from paddle/function/neon/NeonDepthwiseConvTranspose.cpp rename to paddle/legacy/function/neon/NeonDepthwiseConvTranspose.cpp index d443d3fa49..feb77e1ff9 100644 --- a/paddle/function/neon/NeonDepthwiseConvTranspose.cpp +++ b/paddle/legacy/function/neon/NeonDepthwiseConvTranspose.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "NeonDepthwiseConv.h" -#include "paddle/function/ConvOp.h" +#include "paddle/legacy/function/ConvOp.h" namespace paddle { @@ -21,7 +21,7 @@ namespace paddle { template class NeonDepthwiseConvTransposeFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); } diff --git a/paddle/function/neon/neon_util.h b/paddle/legacy/function/neon/neon_util.h similarity index 100% rename from paddle/function/neon/neon_util.h rename to paddle/legacy/function/neon/neon_util.h diff --git a/paddle/function/nnpack/NNPACKConvOp.cpp b/paddle/legacy/function/nnpack/NNPACKConvOp.cpp similarity index 99% rename from paddle/function/nnpack/NNPACKConvOp.cpp rename to paddle/legacy/function/nnpack/NNPACKConvOp.cpp index 3cdba4f2ed..81c832e774 100644 --- a/paddle/function/nnpack/NNPACKConvOp.cpp +++ b/paddle/legacy/function/nnpack/NNPACKConvOp.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "nnpack.h" -#include "paddle/function/ConvOp.h" +#include "paddle/legacy/function/ConvOp.h" DEFINE_bool(nnpack_allocate_outside, true, @@ -46,7 +46,7 @@ nnp_convolution_algorithm get_nnp_convolution_algorithm( template class NNPACKConvFunction : public ConvFunctionBase { -public: + public: void init(const FuncConfig& config) override { ConvFunctionBase::init(config); algorithm_ = get_nnp_convolution_algorithm(config.get("algo")); @@ -231,7 +231,7 @@ public: } } -private: + private: nnp_convolution_algorithm algorithm_; nnp_convolution_transform_strategy transform_strategy_; void* workspaceBuffer_; diff --git a/paddle/function/nnpack/NNPACKConvOpTest.cpp b/paddle/legacy/function/nnpack/NNPACKConvOpTest.cpp similarity index 95% rename from paddle/function/nnpack/NNPACKConvOpTest.cpp rename to paddle/legacy/function/nnpack/NNPACKConvOpTest.cpp index c80ffb5d5d..a2db83f5a3 100644 --- a/paddle/function/nnpack/NNPACKConvOpTest.cpp +++ b/paddle/legacy/function/nnpack/NNPACKConvOpTest.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/function/ConvOpTest.h" +#include "paddle/legacy/function/ConvOpTest.h" namespace paddle { diff --git a/paddle/gserver/CMakeLists.txt b/paddle/legacy/gserver/CMakeLists.txt similarity index 97% rename from paddle/gserver/CMakeLists.txt rename to paddle/legacy/gserver/CMakeLists.txt index 3d6ced713f..6dc877dd90 100644 --- a/paddle/gserver/CMakeLists.txt +++ b/paddle/legacy/gserver/CMakeLists.txt @@ -146,8 +146,6 @@ else() ${GSERVER_SOURCES}) endif() -add_style_check_target(paddle_gserver ${GSERVER_SOURCES}) -add_style_check_target(paddle_gserver ${GSERVER_HEADER}) add_dependencies(paddle_gserver paddle_proto ${external_project_dependencies}) if(WITH_TESTING) add_subdirectory(tests) diff --git a/paddle/gserver/activations/ActivationFunction.cpp b/paddle/legacy/gserver/activations/ActivationFunction.cpp similarity index 97% rename from paddle/gserver/activations/ActivationFunction.cpp rename to paddle/legacy/gserver/activations/ActivationFunction.cpp index 8d8f01234f..ae07c7e6d7 100644 --- a/paddle/gserver/activations/ActivationFunction.cpp +++ b/paddle/legacy/gserver/activations/ActivationFunction.cpp @@ -20,9 +20,9 @@ limitations under the License. */ #include #include #include -#include "paddle/parameter/Argument.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/parameter/Argument.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Logging.h" #ifdef PADDLE_WITH_MKLDNN #include "MKLDNNActivation.h" @@ -44,10 +44,10 @@ static ClassRegistrar gActivationRegistrar; */ #define BEGIN_DEFINE_ACTIVATION(ACTIVATION_NAME) \ class ACTIVATION_CLASS_NAME(ACTIVATION_NAME) : public ActivationFunction { \ - private: \ + private: \ static const std::string name; \ \ - public: \ + public: \ const std::string& getName() const { return name; } /** * @def END_DEFINE_ACTIVATION @@ -70,7 +70,7 @@ static ClassRegistrar gActivationRegistrar; * Do nothing when forward/backward. */ class IdentityActivation : public ActivationFunction { -public: + public: static const std::string name; Error __must_check forward(Argument& act) { (void)act; diff --git a/paddle/gserver/activations/ActivationFunction.h b/paddle/legacy/gserver/activations/ActivationFunction.h similarity index 97% rename from paddle/gserver/activations/ActivationFunction.h rename to paddle/legacy/gserver/activations/ActivationFunction.h index 0f4b0fe0ab..8bc5b0f529 100644 --- a/paddle/gserver/activations/ActivationFunction.h +++ b/paddle/legacy/gserver/activations/ActivationFunction.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include -#include "paddle/utils/Error.h" +#include "paddle/legacy/utils/Error.h" namespace paddle { @@ -31,7 +31,7 @@ struct Argument; * */ class ActivationFunction { -public: + public: static ActivationFunction* create(const std::string& type); static std::vector getAllRegisteredTypes(); diff --git a/paddle/gserver/activations/MKLDNNActivation.cpp b/paddle/legacy/gserver/activations/MKLDNNActivation.cpp similarity index 96% rename from paddle/gserver/activations/MKLDNNActivation.cpp rename to paddle/legacy/gserver/activations/MKLDNNActivation.cpp index 56ffb83934..2eed7af70a 100644 --- a/paddle/gserver/activations/MKLDNNActivation.cpp +++ b/paddle/legacy/gserver/activations/MKLDNNActivation.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "MKLDNNActivation.h" #include "mkldnn.hpp" -#include "paddle/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/ClassRegistrar.h" namespace paddle { @@ -35,10 +35,10 @@ static ClassRegistrar gMKLDNNActivationRegistrar; * @def END_MKLDNN_ACTIVATION */ #define END_MKLDNN_ACTIVATION(ACT_TYPE) \ -private: \ + private: \ static const std::string name; \ \ -public: \ + public: \ const std::string& getName() const { return name; } \ } \ ; \ @@ -63,11 +63,11 @@ public: \ #define DEFINE_MKLDNN_ELTWISE_ACTIVATION( \ ACT_TYPE, BASE_CLASS, ALPHA, BWD_ALPHA) \ BEGIN_MKLDNN_ACTIVATION(ACT_TYPE, BASE_CLASS) \ -private: \ + private: \ static const float alpha; \ static const float bwdAlpha; \ \ -public: \ + public: \ float getAlpha() const { return alpha; } \ float getBwdAlpha() const { return bwdAlpha; } \ END_MKLDNN_ACTIVATION(ACT_TYPE) \ diff --git a/paddle/gserver/activations/MKLDNNActivation.h b/paddle/legacy/gserver/activations/MKLDNNActivation.h similarity index 94% rename from paddle/gserver/activations/MKLDNNActivation.h rename to paddle/legacy/gserver/activations/MKLDNNActivation.h index 392b32c70d..59c447ad07 100644 --- a/paddle/gserver/activations/MKLDNNActivation.h +++ b/paddle/legacy/gserver/activations/MKLDNNActivation.h @@ -15,9 +15,9 @@ limitations under the License. */ #pragma once #include "ActivationFunction.h" #include "mkldnn.hpp" -#include "paddle/gserver/layers/MKLDNNBase.h" -#include "paddle/math/MKLDNNMatrix.h" -#include "paddle/parameter/Argument.h" +#include "paddle/legacy/gserver/layers/MKLDNNBase.h" +#include "paddle/legacy/math/MKLDNNMatrix.h" +#include "paddle/legacy/parameter/Argument.h" namespace paddle { @@ -27,7 +27,7 @@ namespace paddle { * including mkldnn_relu, mkldnn_elu, mkldnn_tanh, mkldnn_softmax */ class MKLDNNActivation : public ActivationFunction { -protected: + protected: // input value element count size_t cnt_; // should not merge the resetBwd into resetFwd, @@ -43,7 +43,7 @@ protected: std::vector pipelineFwd_; std::vector pipelineBwd_; -public: + public: MKLDNNActivation() : cnt_(0), needResetBwd_(true) {} ~MKLDNNActivation() {} static ActivationFunction* create(const std::string& type); @@ -72,7 +72,7 @@ class MKLDNNEltwiseActivation : public MKLDNNActivation { typedef mkldnn::eltwise_backward eltwise_bwd; typedef mkldnn::algorithm algorithm; -protected: + protected: // save the forward primitive desc, which can be used backward std::shared_ptr fwdPD_; // eltwise_bwd need src input value @@ -80,7 +80,7 @@ protected: // use for copy data std::shared_ptr copyInVal_; -public: + public: MKLDNNEltwiseActivation() {} ~MKLDNNEltwiseActivation() {} virtual const std::string& getName() const = 0; @@ -102,12 +102,12 @@ public: class MKLDNNSoftmaxActivation : public MKLDNNActivation { typedef mkldnn::softmax_forward softmax_fwd; -private: + private: // for backward MatrixPtr sftMaxSum_; MatrixPtr sftMaxDot_; -public: + public: MKLDNNSoftmaxActivation() {} ~MKLDNNSoftmaxActivation() {} virtual const std::string& getName() const = 0; diff --git a/paddle/gserver/dataproviders/DataProvider.cpp b/paddle/legacy/gserver/dataproviders/DataProvider.cpp similarity index 98% rename from paddle/gserver/dataproviders/DataProvider.cpp rename to paddle/legacy/gserver/dataproviders/DataProvider.cpp index 580cf821c6..b67af8a326 100644 --- a/paddle/gserver/dataproviders/DataProvider.cpp +++ b/paddle/legacy/gserver/dataproviders/DataProvider.cpp @@ -16,10 +16,10 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/dataproviders/DataProvider.h b/paddle/legacy/gserver/dataproviders/DataProvider.h similarity index 95% rename from paddle/gserver/dataproviders/DataProvider.h rename to paddle/legacy/gserver/dataproviders/DataProvider.h index 4851168aba..c2e1c5fdd6 100644 --- a/paddle/gserver/dataproviders/DataProvider.h +++ b/paddle/legacy/gserver/dataproviders/DataProvider.h @@ -25,17 +25,17 @@ limitations under the License. */ #include #include "DataConfig.pb.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" -#include "paddle/math/Vector.h" -#include "paddle/parameter/Argument.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/parameter/Argument.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { /** @@ -71,7 +71,7 @@ typedef std::shared_ptr BufferBatchPtr; * @brief Data for batch training a neural network */ class DataBatch { -public: + public: DataBatch() : size_(0) { data_.clear(); } /** * @brief Get batch size @@ -181,7 +181,7 @@ public: } } -protected: + protected: /** * @brief batch size */ @@ -194,7 +194,7 @@ protected: }; class BufferBatch { -public: + public: BufferBatch() { hlStream_ = HPPL_STREAM_DEFAULT; hlEvent_ = NULL; @@ -235,7 +235,7 @@ public: void swap(BufferBatch* bufBatch); void clone(DataBatch* srcBatch, bool useGpu); -protected: + protected: DataBatch* batchData_; hl_stream_t hlStream_; hl_event_t hlEvent_; @@ -247,7 +247,7 @@ typedef std::shared_ptr DataProviderPtr; typedef Queue BufferBatchQueue; class DoubleBuffer { -public: + public: DoubleBuffer(DataProvider* dataPool, bool useGpu, int64_t batchSize = 0); virtual ~DoubleBuffer(); void removeOneBatch(DataBatch* dataBatch); @@ -267,7 +267,7 @@ public: void setPending(bool pending) { pending_ = pending; } -protected: + protected: virtual void asyncLoadBatch(); void insertOneBatch(DataBatch* batch); @@ -290,7 +290,7 @@ protected: * one is for input, one is for label. */ class DataProvider { -public: + public: static ClassRegistrar registrar_; static DataProvider* create(const DataConfig& config, const ModelConfig& modelConfig, @@ -359,7 +359,7 @@ public: */ virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch) = 0; -protected: + protected: DataConfig config_; bool skipShuffle_; float usageRatio_; @@ -382,7 +382,7 @@ protected: * necessary configurations such as stream_names */ class DummyDataProvider : public DataProvider { -public: + public: DummyDataProvider(const DataConfig& config, bool useGpu) : DataProvider(config, useGpu) {} virtual void shuffle() {} @@ -399,7 +399,7 @@ public: * Data provider for one input and one integer label. */ class SimpleDataProviderBase : public DataProvider { -protected: + protected: /// sample feature dimension int64_t sampleDim_; /// the number of samples @@ -425,7 +425,7 @@ protected: RWLock lock_; -public: + public: SimpleDataProviderBase(const DataConfig& config, bool useGpu, bool withInfo); ~SimpleDataProviderBase() {} @@ -440,7 +440,7 @@ public: /// return the number of samples in the buffer int64_t fillBuffer(); -protected: + protected: /** * @brief Fill at most size samples into data and label. * @@ -458,12 +458,12 @@ protected: }; class SimpleDataProvider : public SimpleDataProviderBase { -public: + public: SimpleDataProvider(const DataConfig& config, bool useGpu); ~SimpleDataProvider(); virtual void reset(); -protected: + protected: void loadData(const std::string& fileName); void loadDataFile(const std::string& fileName); virtual int64_t fillBufferImp(real* data, @@ -471,7 +471,7 @@ protected: int* info, int64_t size); -protected: + protected: size_t currentSampleIndex_; std::vector labels_; std::vector data_; diff --git a/paddle/gserver/dataproviders/DataProviderGroup.h b/paddle/legacy/gserver/dataproviders/DataProviderGroup.h similarity index 99% rename from paddle/gserver/dataproviders/DataProviderGroup.h rename to paddle/legacy/gserver/dataproviders/DataProviderGroup.h index 768e54fe82..91c94dc986 100644 --- a/paddle/gserver/dataproviders/DataProviderGroup.h +++ b/paddle/legacy/gserver/dataproviders/DataProviderGroup.h @@ -20,7 +20,7 @@ namespace paddle { template class DataProviderGroup : public DataProvider { -protected: + protected: typedef T ProviderType; typedef std::shared_ptr ProviderPtrType; ProviderPtrType provider_; @@ -29,7 +29,7 @@ protected: std::mutex lock_; std::unique_ptr> loader_; -public: + public: DataProviderGroup(const DataConfig& config, bool useGpu); ~DataProviderGroup() {} @@ -38,7 +38,7 @@ public: virtual int64_t getSize() { return -1; } virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch); -private: + private: void startLoader(); void stopLoader(); void forceStopLoader(); diff --git a/paddle/gserver/dataproviders/MultiDataProvider.cpp b/paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp similarity index 98% rename from paddle/gserver/dataproviders/MultiDataProvider.cpp rename to paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp index f71947ef39..e5fc6d8a88 100644 --- a/paddle/gserver/dataproviders/MultiDataProvider.cpp +++ b/paddle/legacy/gserver/dataproviders/MultiDataProvider.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "MultiDataProvider.h" #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/dataproviders/MultiDataProvider.h b/paddle/legacy/gserver/dataproviders/MultiDataProvider.h similarity index 97% rename from paddle/gserver/dataproviders/MultiDataProvider.h rename to paddle/legacy/gserver/dataproviders/MultiDataProvider.h index 9a863c8967..baa1fc0190 100644 --- a/paddle/gserver/dataproviders/MultiDataProvider.h +++ b/paddle/legacy/gserver/dataproviders/MultiDataProvider.h @@ -19,10 +19,10 @@ limitations under the License. */ namespace paddle { class MultiDataProvider : public DataProvider { -protected: + protected: std::vector> subDataProviders_; -public: + public: MultiDataProvider(const DataConfig& config, const ModelConfig& modelConfig, bool useGpu); @@ -33,7 +33,7 @@ public: virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch); bool isTestMode() const { return isTestMode_; } -private: + private: int totalDataRatio_; bool isTestMode_; }; diff --git a/paddle/gserver/dataproviders/ProtoReader.h b/paddle/legacy/gserver/dataproviders/ProtoReader.h similarity index 99% rename from paddle/gserver/dataproviders/ProtoReader.h rename to paddle/legacy/gserver/dataproviders/ProtoReader.h index 786703f4de..08d045226e 100644 --- a/paddle/gserver/dataproviders/ProtoReader.h +++ b/paddle/legacy/gserver/dataproviders/ProtoReader.h @@ -28,7 +28,7 @@ namespace paddle { * messages from/to i/ostream. */ class ProtoReader { -public: + public: explicit ProtoReader(std::istream* s, bool dataCompression = false) { CHECK(s) << "istream pointer is nullptr"; istreamInput_.reset(new google::protobuf::io::IstreamInputStream(s)); @@ -109,7 +109,7 @@ public: return true; } -protected: + protected: std::unique_ptr istreamInput_; std::unique_ptr gzipInput_; std::unique_ptr codedInput_; @@ -144,7 +144,7 @@ protected: }; class ProtoWriter { -public: + public: explicit ProtoWriter(std::ostream* s, bool dataCompression = false) { CHECK(s) << "ostream pointer is nullptr"; ostreamOutput_.reset(new google::protobuf::io::OstreamOutputStream(s)); @@ -168,7 +168,7 @@ public: return ret; } -protected: + protected: std::unique_ptr ostreamOutput_; std::unique_ptr gzipOutput_; std::unique_ptr codedOutput_; diff --git a/paddle/gserver/dataproviders/PyDataProvider.cpp b/paddle/legacy/gserver/dataproviders/PyDataProvider.cpp similarity index 99% rename from paddle/gserver/dataproviders/PyDataProvider.cpp rename to paddle/legacy/gserver/dataproviders/PyDataProvider.cpp index dadf1b4cf2..0827bd39d4 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.cpp +++ b/paddle/legacy/gserver/dataproviders/PyDataProvider.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PyDataProvider.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/dataproviders/PyDataProvider.h b/paddle/legacy/gserver/dataproviders/PyDataProvider.h similarity index 98% rename from paddle/gserver/dataproviders/PyDataProvider.h rename to paddle/legacy/gserver/dataproviders/PyDataProvider.h index e53354c9e4..4b8bea04a1 100644 --- a/paddle/gserver/dataproviders/PyDataProvider.h +++ b/paddle/legacy/gserver/dataproviders/PyDataProvider.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include +#include #include "DataFormat.pb.h" #include "DataProvider.h" @@ -23,7 +23,7 @@ limitations under the License. */ namespace paddle { class PyDataProvider : public DataProvider { -public: + public: PyDataProvider(const DataConfig& config, bool useGpu, bool loadDataAll = true); @@ -40,7 +40,7 @@ public: virtual int64_t getNextBatchInternal(int64_t size, DataBatch* batch); -protected: + protected: struct ProtoSlot; // return false if each each sample is one sequence, i.e., independent // of other samples. @@ -73,7 +73,7 @@ protected: void resetSlots(); void loadData(const std::vector& fileList); -protected: + protected: struct ProtoSlot { SlotDef::SlotType type; int dim; diff --git a/paddle/gserver/dataproviders/PyDataProvider2.cpp b/paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp similarity index 98% rename from paddle/gserver/dataproviders/PyDataProvider2.cpp rename to paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp index b4215bb307..8e931e4061 100644 --- a/paddle/gserver/dataproviders/PyDataProvider2.cpp +++ b/paddle/legacy/gserver/dataproviders/PyDataProvider2.cpp @@ -25,9 +25,9 @@ limitations under the License. */ #include "DataProvider.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -93,7 +93,7 @@ inline std::ostream& operator<<(std::ostream& os, const SlotHeader& header) { * prepare step, fill data into argument during fill step. */ class IFieldScanner { -public: + public: DISABLE_COPY(IFieldScanner); /** * Ctor. @@ -146,7 +146,7 @@ public: */ static IFieldScanner* create(SlotHeader* header); -protected: + protected: SlotHeader* headerPtr_; }; @@ -154,7 +154,7 @@ protected: * Py Data Provider Cache Interface. */ class IPyDataProviderCache { -public: + public: virtual ~IPyDataProviderCache() {} /** @@ -193,7 +193,7 @@ public: * data. And it support cache strategies. */ class PyDataProvider2 : public DataProvider { -public: + public: /** * Ctor */ @@ -234,7 +234,7 @@ public: */ virtual ~PyDataProvider2() { resetImpl(false); } -private: + private: void createPyDataObj(const std::string& model, const std::string& className, const std::string& fileListName, @@ -435,7 +435,7 @@ private: exit_ = false; } -private: + private: std::unique_ptr loadThread_; std::atomic exit_; std::deque callingContexts_; @@ -461,7 +461,7 @@ private: static PyObjectPtr zeroTuple_; class PositionRandom { - public: + public: inline explicit PositionRandom(bool skipRand) : eng_(ThreadLocalRandomEngine::get()), skipRand_(skipRand) {} @@ -476,14 +476,14 @@ private: } } - private: + private: std::default_random_engine& eng_; std::unique_ptr> dist_; bool skipRand_; }; // DataProvider interface -public: + public: /** * Resetting the PyDataProvider. May start reading thread here. */ @@ -666,7 +666,7 @@ REGISTER_DATA_PROVIDER_EX(py2, PyDataProvider2); * Scanner for dense slot. */ class DenseScanner : public IFieldScanner { -public: + public: explicit DenseScanner(SlotHeader* ptr) : IFieldScanner(ptr), height_(0) {} /** @@ -708,7 +708,7 @@ public: ++height_; } -private: + private: size_t height_; }; @@ -716,7 +716,7 @@ private: * Scanner for index slot */ class IndexScanner : public IFieldScanner { -public: + public: explicit IndexScanner(SlotHeader* ptr) : IFieldScanner(ptr), cnt_(0) {} /** @@ -740,12 +740,12 @@ public: CHECK(ok) << "Cannot cast int " << py::repr(obj); } -private: + private: size_t cnt_; }; class SparseNonValueScanner : public IFieldScanner { -public: + public: explicit SparseNonValueScanner(SlotHeader* ptr) : IFieldScanner(ptr), nnz_(0), height_(0) {} @@ -790,7 +790,7 @@ public: ++height_; } -protected: + protected: /** * Set a single sparse index and value. * @param [out] col sparse index @@ -809,7 +809,7 @@ protected: }; class SparseValueScanner : public SparseNonValueScanner { -public: + public: explicit SparseValueScanner(SlotHeader* ptr) : SparseNonValueScanner(ptr) {} virtual void finishPrepare(Argument& argument) { @@ -817,7 +817,7 @@ public: argument.value, height_, headerPtr_->dim, nnz_, FLOAT_VALUE); } -protected: + protected: virtual void setData(int* col, real* dat, PyObject* obj) { py::SequenceHelper s(obj); SparseNonValueScanner::setData(col, dat, s[0]); @@ -829,7 +829,7 @@ protected: * Sequence Scanner. Scanner for sequence or sub-sequence. */ class SequenceScanner : public IFieldScanner { -public: + public: /** * Ctor * @param innerScanner inner scanner for each timestep or sub-sequence. @@ -902,7 +902,7 @@ public: */ virtual void finishFill(Argument& argument) { inner_->finishFill(argument); } -protected: + protected: size_t getSize(PyObject* obj) { py::SequenceHelper s(obj); auto sc = dynamic_cast(inner_.get()); @@ -917,7 +917,7 @@ protected: } } -private: + private: std::unique_ptr inner_; size_t cnt_; std::function getSeqStartPos_; @@ -969,7 +969,7 @@ IFieldScanner* IFieldScanner::create(SlotHeader* header) { * python every pass. */ class NoCacheStrategy : public IPyDataProviderCache { -public: + public: virtual bool reset() { return true; } virtual void drop(std::deque* data) { data->clear(); } @@ -984,7 +984,7 @@ public: * The rest passes, will load data from memory. */ class CacheOnePassInMemory : public IPyDataProviderCache { -public: + public: CacheOnePassInMemory() : objPool_(new std::deque()), droppedPool_(new std::deque()) {} @@ -1011,7 +1011,7 @@ public: virtual std::deque* load() { return objPool_.get(); } -private: + private: std::unique_ptr> objPool_; std::unique_ptr> droppedPool_; }; diff --git a/paddle/gserver/evaluators/CTCErrorEvaluator.cpp b/paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp similarity index 98% rename from paddle/gserver/evaluators/CTCErrorEvaluator.cpp rename to paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp index 0f680de776..c145adda5e 100644 --- a/paddle/gserver/evaluators/CTCErrorEvaluator.cpp +++ b/paddle/legacy/gserver/evaluators/CTCErrorEvaluator.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Evaluator.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/utils/StringUtil.h" namespace paddle { @@ -22,7 +22,7 @@ namespace paddle { * calculate sequence-to-sequence edit distance */ class CTCErrorEvaluator : public Evaluator { -private: + private: MatrixPtr outActivations_; int numTimes_, numClasses_, numSequences_, blank_; real deletions_, insertions_, substitutions_; @@ -197,7 +197,7 @@ private: (real)seqClassficationError_ / numSequences_; } -public: + public: CTCErrorEvaluator() : numTimes_(0), numClasses_(0), diff --git a/paddle/gserver/evaluators/ChunkEvaluator.cpp b/paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp similarity index 98% rename from paddle/gserver/evaluators/ChunkEvaluator.cpp rename to paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp index 755b91d05c..0ff3f2fa8c 100644 --- a/paddle/gserver/evaluators/ChunkEvaluator.cpp +++ b/paddle/legacy/gserver/evaluators/ChunkEvaluator.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include #include -#include "paddle/math/Vector.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/StringUtil.h" #include "Evaluator.h" @@ -77,7 +77,7 @@ class ChunkEvaluator : public Evaluator { std::set excludedChunkTypes_; mutable std::unordered_map values_; -public: + public: virtual void init(const EvaluatorConfig& config) { Evaluator::init(config); if (config.chunk_scheme() == "IOB") { @@ -276,7 +276,7 @@ public: return "chunk"; } -private: + private: void storeLocalValues() const { CHECK_GE(numOutputSegments_, 0); CHECK_GE(numLabelSegments_, 0); diff --git a/paddle/gserver/evaluators/DetectionMAPEvaluator.cpp b/paddle/legacy/gserver/evaluators/DetectionMAPEvaluator.cpp similarity index 99% rename from paddle/gserver/evaluators/DetectionMAPEvaluator.cpp rename to paddle/legacy/gserver/evaluators/DetectionMAPEvaluator.cpp index f43ef5dd51..57657241f8 100644 --- a/paddle/gserver/evaluators/DetectionMAPEvaluator.cpp +++ b/paddle/legacy/gserver/evaluators/DetectionMAPEvaluator.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Evaluator.h" -#include "paddle/gserver/layers/DetectionUtil.h" +#include "paddle/legacy/gserver/layers/DetectionUtil.h" using std::map; using std::vector; @@ -28,7 +28,7 @@ namespace paddle { * The config file api is detection_map_evaluator. */ class DetectionMAPEvaluator : public Evaluator { -public: + public: DetectionMAPEvaluator() : evaluateDifficult_(false), cpuOutput_(nullptr), cpuLabel_(nullptr) {} @@ -132,7 +132,7 @@ public: LOG(FATAL) << "Distribute detection evaluation not implemented."; } -protected: + protected: void calcTFPos(const size_t batchSize, const vector>>& allGTBBoxes, const vector>>>& @@ -287,7 +287,7 @@ protected: real getValueImpl() const { return calcMAP(); } -private: + private: real overlapThreshold_; // overlap threshold when determining whether matched bool evaluateDifficult_; // whether evaluate difficult ground truth size_t backgroundId_; // class index of background diff --git a/paddle/gserver/evaluators/Evaluator.cpp b/paddle/legacy/gserver/evaluators/Evaluator.cpp similarity index 99% rename from paddle/gserver/evaluators/Evaluator.cpp rename to paddle/legacy/gserver/evaluators/Evaluator.cpp index 79478e7fac..a956f40d02 100644 --- a/paddle/gserver/evaluators/Evaluator.cpp +++ b/paddle/legacy/gserver/evaluators/Evaluator.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/gserver/evaluators/Evaluator.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/gserver/evaluators/Evaluator.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" DECLARE_int32(trainer_id); @@ -38,7 +38,7 @@ void Evaluator::eval(const NeuralNetwork& nn) { * The config file api is classification_error_evaluator. */ class ClassificationErrorEvaluator : public Evaluator { -public: + public: /* ClassificationErrorEvaluator() : totalScore2_(0) {} @@ -124,7 +124,7 @@ public: } // Evaluator interface -protected: + protected: std::string getTypeImpl() const { return "classification_error"; } }; @@ -135,7 +135,7 @@ protected: */ class SequenceClassificationErrorEvaluator : public ClassificationErrorEvaluator { -public: + public: virtual void updateSamplesNum(const std::vector& arguments) { numSamples_ += arguments[0].getNumSequences(); } @@ -166,7 +166,7 @@ public: } // Evaluator interface -protected: + protected: std::string getTypeImpl() const { return "seq_classification_error"; } }; REGISTER_EVALUATOR(seq_classification_error, @@ -178,7 +178,7 @@ REGISTER_EVALUATOR(seq_classification_error, * The config file api is sum_evaluator. */ class SumEvaluator : public Evaluator { -public: + public: SumEvaluator() : cpuLabel_(nullptr), cpuWeight_(nullptr) {} virtual void updateSamplesNum(const std::vector& arguments) { @@ -255,12 +255,12 @@ public: mergeResultsOfAllClients(client); } -private: + private: IVectorPtr cpuLabel_; MatrixPtr cpuWeight_; // Evaluator interface -protected: + protected: std::string getTypeImpl() const { return "sum"; } }; /** @@ -274,7 +274,7 @@ protected: * */ class ColumnSumEvaluator : public Evaluator { -public: + public: explicit ColumnSumEvaluator(int32_t colIdx) : colIdx_(colIdx), colNum_(0), sum_(nullptr) {} @@ -368,13 +368,13 @@ public: client->reduce(&numSamples_, &numSamples_, 1, FLAGS_trainer_id, 0); } -private: + private: int32_t colIdx_; size_t colNum_; MatrixPtr sum_; /* cpu matrix */ // Evaluator interface -protected: + protected: std::string getTypeImpl() const { if (colIdx_ == -1) return "last-column-sum"; @@ -1018,7 +1018,7 @@ static InitFunction __reg_type_auc_sum__([]() { * The config file api is value_printer_evaluator. */ class ValuePrinter : public NotGetableEvaluator { -public: + public: virtual void eval(const NeuralNetwork& nn) { for (const std::string& name : config_.input_layers()) { nn.getLayer(name)->getOutput().printValueString(LOG(INFO), @@ -1038,7 +1038,7 @@ REGISTER_EVALUATOR(value_printer, ValuePrinter); * The config file api is gradient_printer_evaluator. */ class GradientPrinter : public NotGetableEvaluator { -public: + public: virtual void eval(const NeuralNetwork& nn) { for (const std::string& name : config_.input_layers()) { const Argument& argu = nn.getLayer(name)->getOutput(); @@ -1061,11 +1061,11 @@ REGISTER_EVALUATOR(gradient_printer, GradientPrinter); * The config file api is maxid_printer_evaluator. */ class MaxIdPrinter : public NotGetableEvaluator { -private: + private: IVectorPtr maxIds_; MatrixPtr maxValues_; -public: + public: MaxIdPrinter() {} virtual void eval(const NeuralNetwork& nn) { @@ -1103,12 +1103,12 @@ REGISTER_EVALUATOR(max_id_printer, MaxIdPrinter); * The config file api is maxframe_printer_evaluator. */ class MaxFramePrinter : public NotGetableEvaluator { -private: + private: IVectorPtr maxIds_; MatrixPtr maxValues_; MatrixPtr value_; -public: + public: MaxFramePrinter() { value_ = Matrix::create(nullptr, /* height= */ 1, 1, /* trans= */ false, false); @@ -1190,7 +1190,7 @@ REGISTER_EVALUATOR(max_frame_printer, MaxFramePrinter); * */ class SequenceTextPrinter : public NotGetableEvaluator { -private: + private: /// dict_file, which contains a list of tokens std::vector dict_; /// result_file, which is the output file @@ -1203,7 +1203,7 @@ private: /// store the probability associated with each sequence std::vector cpuIn_; -public: + public: SequenceTextPrinter() {} virtual void init(const EvaluatorConfig& config) { @@ -1334,7 +1334,7 @@ REGISTER_EVALUATOR(seq_text_printer, SequenceTextPrinter); * The config file api is classification_error_printer_evaluator. */ class ClassificationErrorPrinter : public ClassificationErrorEvaluator { -public: + public: virtual void updateSamplesNum(const std::vector& arguments) {} virtual real evalImp(std::vector& arguments) { diff --git a/paddle/gserver/evaluators/Evaluator.h b/paddle/legacy/gserver/evaluators/Evaluator.h similarity index 97% rename from paddle/gserver/evaluators/Evaluator.h rename to paddle/legacy/gserver/evaluators/Evaluator.h index be2032992c..b3462819b1 100644 --- a/paddle/gserver/evaluators/Evaluator.h +++ b/paddle/legacy/gserver/evaluators/Evaluator.h @@ -16,10 +16,10 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" -#include "paddle/parameter/Argument.h" -#include "paddle/pserver/ParameterClient2.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Error.h" +#include "paddle/legacy/parameter/Argument.h" +#include "paddle/legacy/pserver/ParameterClient2.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Error.h" namespace paddle { @@ -40,7 +40,7 @@ class NeuralNetwork; * has been by a trained model. */ class Evaluator { -public: + public: static Evaluator* create(const EvaluatorConfig& config); Evaluator() : numSamples_(0), totalScore_(0) {} @@ -172,7 +172,7 @@ public: return this->getTypeImpl(); } -protected: + protected: /** * @brief getValueImpl The simplest way to define getValue result. If this * evaluator doesn't contain multiple fields, and do not throw any error, just @@ -191,7 +191,7 @@ protected: */ virtual std::string getTypeImpl() const { return "base"; } -protected: + protected: EvaluatorConfig config_; double numSamples_; double totalScore_; @@ -204,7 +204,7 @@ protected: */ class NotGetableEvaluator : public Evaluator { // Evaluator interface -public: + public: void getNames(std::vector* names) {} real getValue(const std::string& name, Error* err) const { @@ -219,7 +219,7 @@ public: }; class DummyEvaluator : public Evaluator { -public: + public: DummyEvaluator() {} virtual void init(const EvaluatorConfig&) {} virtual void start() {} @@ -232,7 +232,7 @@ public: virtual void printStats(std::ostream&) const {} // Evaluator interface -protected: + protected: std::string getTypeImpl() const; }; /** @@ -251,7 +251,7 @@ protected: * */ class AucEvaluator : public Evaluator { -public: + public: AucEvaluator(int32_t colIdx) : colIdx_(colIdx), realColumnIdx_(0), @@ -269,7 +269,7 @@ public: virtual void distributeEval(ParameterClient2* client); -private: + private: static const uint32_t kBinNum_ = (1 << 24) - 1; static const int kNegativeLabel_ = 0; double statPos_[kBinNum_ + 1]; @@ -292,7 +292,7 @@ private: double calcAuc() const; // Evaluator interface -protected: + protected: real getValueImpl() const; std::string getTypeImpl() const; }; @@ -305,7 +305,7 @@ protected: * dense value. */ class RankAucEvaluator : public Evaluator { -public: + public: // evaluate ranking AUC virtual void start(); @@ -317,7 +317,7 @@ public: mergeResultsOfAllClients(client); } -private: + private: MatrixPtr output_; MatrixPtr click_; MatrixPtr pv_; @@ -329,7 +329,7 @@ private: size_t size); // Evaluator interface -protected: + protected: std::string getTypeImpl() const; }; @@ -344,7 +344,7 @@ protected: * The config file api is precision_recall_evaluator. */ class PrecisionRecallEvaluator : public Evaluator { -public: + public: // Evaluate precision, recall and F1 score PrecisionRecallEvaluator() : isMultiBinaryLabel_(false), @@ -379,7 +379,7 @@ public: StatsInfo() : TP(0.0), TN(0.0), FP(0.0), FN(0.0) {} }; -private: + private: bool isMultiBinaryLabel_; std::vector statsInfo_; @@ -444,7 +444,7 @@ private: * The config file api is pnpair_evaluator. */ class PnpairEvaluator : public Evaluator { -public: + public: PnpairEvaluator() : cpuOutput_(nullptr), cpuLabel_(nullptr), @@ -491,7 +491,7 @@ public: << " calc total neg pair: " << pairArray_[1]; } -private: + private: static const uint32_t kPairArrayNum_ = 2; double pairArray_[kPairArrayNum_]; MatrixPtr cpuOutput_; @@ -500,7 +500,7 @@ private: MatrixPtr cpuWeight_; // Evaluator interface -protected: + protected: real getValueImpl() const { return pairArray_[0] / ((pairArray_[1] <= 0) ? 1.0 : pairArray_[1]); } diff --git a/paddle/gserver/gradientmachines/GradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/GradientMachine.cpp similarity index 98% rename from paddle/gserver/gradientmachines/GradientMachine.cpp rename to paddle/legacy/gserver/gradientmachines/GradientMachine.cpp index 654024e8a4..1c4034d8bb 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/GradientMachine.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "GradientMachine.h" #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "NeuralNetwork.h" #include "hl_gpu.h" diff --git a/paddle/gserver/gradientmachines/GradientMachine.h b/paddle/legacy/gserver/gradientmachines/GradientMachine.h similarity index 94% rename from paddle/gserver/gradientmachines/GradientMachine.h rename to paddle/legacy/gserver/gradientmachines/GradientMachine.h index 60936c311d..d4f754a9f4 100644 --- a/paddle/gserver/gradientmachines/GradientMachine.h +++ b/paddle/legacy/gserver/gradientmachines/GradientMachine.h @@ -19,15 +19,15 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "TrainerConfig.pb.h" -#include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/layers/Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/ParameterUpdaterBase.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/gserver/dataproviders/DataProvider.h" +#include "paddle/legacy/gserver/layers/Layer.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/parameter/ParameterUpdaterBase.h" +#include "paddle/legacy/utils/Thread.h" #ifndef PADDLE_MOBILE_INFERENCE -#include "paddle/gserver/evaluators/Evaluator.h" +#include "paddle/legacy/gserver/evaluators/Evaluator.h" #endif namespace paddle { @@ -73,7 +73,7 @@ class GradientMachine; typedef std::shared_ptr GradientMachinePtr; class GradientMachine { -public: + public: enum CreateMode { kNormal = 0, kSgdSparseCpuTraining = 3, @@ -240,7 +240,7 @@ public: */ virtual void releaseOutput() {} -protected: + protected: virtual void onLoadParameter() {} std::vector parameters_; diff --git a/paddle/gserver/gradientmachines/GradientMachineMode.cpp b/paddle/legacy/gserver/gradientmachines/GradientMachineMode.cpp similarity index 100% rename from paddle/gserver/gradientmachines/GradientMachineMode.cpp rename to paddle/legacy/gserver/gradientmachines/GradientMachineMode.cpp diff --git a/paddle/gserver/gradientmachines/GradientMachineMode.h b/paddle/legacy/gserver/gradientmachines/GradientMachineMode.h similarity index 89% rename from paddle/gserver/gradientmachines/GradientMachineMode.h rename to paddle/legacy/gserver/gradientmachines/GradientMachineMode.h index 898b68fbbc..dd944a35f8 100644 --- a/paddle/gserver/gradientmachines/GradientMachineMode.h +++ b/paddle/legacy/gserver/gradientmachines/GradientMachineMode.h @@ -19,14 +19,14 @@ limitations under the License. */ namespace paddle { class IGradientMachineMode { -public: + public: virtual ~IGradientMachineMode() {} -public: // interfaces - /** - * @brief create current mode's gradient machine by model config. - * @param config model config - */ + public: // interfaces + /** + * @brief create current mode's gradient machine by model config. + * @param config model config + */ virtual GradientMachine* create(const ModelConfig& config) = 0; /** @@ -55,14 +55,14 @@ public: // interfaces */ virtual bool needTrainWholeDataInOneBatch() const = 0; -public: // static methods. - /** - * @brief register a custom gradient machine mode. - * @note For user to register a custom gradient machine mode, id should >= - * kCustom. - * @param mode mode id. - * @param ptr mode description object. - */ + public: // static methods. + /** + * @brief register a custom gradient machine mode. + * @note For user to register a custom gradient machine mode, id should >= + * kCustom. + * @param mode mode id. + * @param ptr mode description object. + */ static void regGradientMachineMode( int32_t mode, std::unique_ptr&& ptr) { modes_.insert(std::make_pair(mode, std::move(ptr))); @@ -141,7 +141,7 @@ public: // static methods. } } -private: + private: static std::unordered_map> modes_; }; diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp similarity index 99% rename from paddle/gserver/gradientmachines/MultiGradientMachine.cpp rename to paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp index b8d4d28f0f..3ef0dfbfe2 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "MultiGradientMachine.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #include "NeuralNetwork.h" #include "ParallelNeuralNetwork.h" @@ -532,6 +532,7 @@ void TrainerThread::computeThread() { break; } } + hl_fini(); } void TrainerThread::prefetch() { @@ -651,6 +652,7 @@ void TrainerThread::copyGradToBufferThread() { } partnerThread->notifyGradientCollect(pid); } + hl_fini(); } void TrainerThread::gradCollectThread() { @@ -693,6 +695,7 @@ void TrainerThread::gradCollectThread() { notifyCopyGradToBuffer(pid); } } + hl_fini(); } void TrainerThread::doCallback(int pid) { @@ -741,6 +744,7 @@ void TrainerThread::valueDispatchThread() { thread->notifyValueReady(pid); } + hl_fini(); } void TrainerThread::notifyValueReady(int paramId) { diff --git a/paddle/gserver/gradientmachines/MultiGradientMachine.h b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h similarity index 99% rename from paddle/gserver/gradientmachines/MultiGradientMachine.h rename to paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h index 83d2651f34..674acd4124 100644 --- a/paddle/gserver/gradientmachines/MultiGradientMachine.h +++ b/paddle/legacy/gserver/gradientmachines/MultiGradientMachine.h @@ -19,8 +19,8 @@ limitations under the License. */ #include "GradientMachine.h" #include "hl_gpu.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Queue.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Queue.h" namespace paddle { @@ -166,7 +166,7 @@ struct GradBuffer { * the merged gradient to parameter server. */ class MultiGradientMachine : public GradientMachine { -public: + public: enum TaskType { TASK_FORWARD_BACKWARD = 0, TASK_FORWARD = 1, @@ -213,7 +213,7 @@ public: /// The gradietns will be copied to each thread in the computing threads. virtual void setOutputGrad(const std::vector& args); -protected: + protected: friend class TrainerThread; std::vector& getAllThreads() { return threads_; } @@ -281,7 +281,7 @@ protected: int paraMainThread(int pid) const { return paraMainThread_[pid]; } -protected: + protected: virtual void forwardImp(const std::vector& inArgs, std::vector* outArgs, PassType passType, @@ -298,7 +298,7 @@ protected: void allocGradBufs(); -protected: + protected: bool useGpu_; bool hasNonstaticCpuParamters_; @@ -342,7 +342,7 @@ protected: }; class TrainerThread { -public: + public: TrainerThread(const ModelConfig& config, int threadId, MultiGradientMachine* multiMachine); @@ -392,7 +392,7 @@ public: /// Whether the thread has input data. bool hasInputData() { return batchSize_ != 0; } -protected: + protected: void mergeCpuGradients(); void mergeGradSparse( @@ -421,7 +421,7 @@ protected: /// GradientMachine::backward void doCallback(int pid); -protected: + protected: MultiGradientMachine* multiMachine_; ModelConfig config_; /// whether the thread should stop diff --git a/paddle/gserver/gradientmachines/MultiNetwork.cpp b/paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp similarity index 98% rename from paddle/gserver/gradientmachines/MultiNetwork.cpp rename to paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp index a1140402b8..1245c44103 100644 --- a/paddle/gserver/gradientmachines/MultiNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/MultiNetwork.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "MultiNetwork.h" @@ -122,7 +122,7 @@ void MultiNetwork::finish() { } class MultiCombinedEvaluator : public Evaluator { -public: + public: MultiCombinedEvaluator() {} void addEvaluator(std::unique_ptr&& evaluator) { evaluators_.emplace_back(std::move(evaluator)); @@ -167,7 +167,7 @@ public: } } -protected: + protected: std::vector> evaluators_; }; diff --git a/paddle/gserver/gradientmachines/MultiNetwork.h b/paddle/legacy/gserver/gradientmachines/MultiNetwork.h similarity index 96% rename from paddle/gserver/gradientmachines/MultiNetwork.h rename to paddle/legacy/gserver/gradientmachines/MultiNetwork.h index 186a9ad0a3..afe15cb020 100644 --- a/paddle/gserver/gradientmachines/MultiNetwork.h +++ b/paddle/legacy/gserver/gradientmachines/MultiNetwork.h @@ -17,12 +17,12 @@ limitations under the License. */ #include "GradientMachine.h" #include "NeuralNetwork.h" -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" namespace paddle { class MultiNetwork : public NeuralNetwork { -public: + public: explicit MultiNetwork(std::string subModelName = "") : NeuralNetwork(subModelName) {} @@ -58,7 +58,7 @@ public: virtual void finish(); -protected: + protected: std::vector> subNetworks_; }; } // namespace paddle diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.cpp b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp similarity index 98% rename from paddle/gserver/gradientmachines/NeuralNetwork.cpp rename to paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp index a3c13df3db..0f8048152f 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.cpp @@ -12,22 +12,22 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "NeuralNetwork.h" #include "hl_gpu.h" -#include "paddle/utils/CustomStackTrace.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/CustomStackTrace.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" #ifdef PADDLE_WITH_MKLDNN -#include "paddle/gserver/layers/MKLDNNLayer.h" +#include "paddle/legacy/gserver/layers/MKLDNNLayer.h" #endif #ifndef PADDLE_MOBILE_INFERENCE #include "MultiNetwork.h" #include "RecurrentGradientMachine.h" -#include "paddle/gserver/layers/AgentLayer.h" +#include "paddle/legacy/gserver/layers/AgentLayer.h" #endif namespace paddle { @@ -362,7 +362,7 @@ void NeuralNetwork::releaseOutput() { #ifndef PADDLE_MOBILE_INFERENCE class CombinedEvaluator : public Evaluator { -public: + public: void addEvaluator(std::unique_ptr&& evaluator) { evaluators_.emplace_back(std::move(evaluator)); } @@ -400,11 +400,11 @@ public: } } -protected: + protected: std::vector> evaluators_; // Evaluator interface -public: + public: /** * @brief getNames will return all inside evaluators' names. * @param names [out]: return names. @@ -435,7 +435,7 @@ public: }); } -private: + private: template T getMethodHelper(const std::string& name, Error* err, @@ -454,7 +454,7 @@ private: }; class SubnetEvaluator : public CombinedEvaluator { -public: + public: SubnetEvaluator(const std::string& layerName, std::unique_ptr&& evaluator) : layerName_(layerName) { @@ -473,7 +473,7 @@ public: << " in submodel " << nn.getName(); } -protected: + protected: std::string layerName_; }; diff --git a/paddle/gserver/gradientmachines/NeuralNetwork.h b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.h similarity index 93% rename from paddle/gserver/gradientmachines/NeuralNetwork.h rename to paddle/legacy/gserver/gradientmachines/NeuralNetwork.h index 5b32f844f7..566157c899 100644 --- a/paddle/gserver/gradientmachines/NeuralNetwork.h +++ b/paddle/legacy/gserver/gradientmachines/NeuralNetwork.h @@ -19,13 +19,13 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" -#include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" -#include "paddle/gserver/layers/CostLayer.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/Layer.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/utils/ClassRegistrar.h" +#include "paddle/legacy/gserver/dataproviders/DataProvider.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/layers/CostLayer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/Layer.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/utils/ClassRegistrar.h" namespace paddle { /* @@ -56,7 +56,7 @@ void parameterInitNN(int paramId, std::vector* sharedParams); class NeuralNetwork : public GradientMachine { -public: + public: virtual void init(const ModelConfig& config, ParamInitCallback callback = nullptr, const std::vector& parameterTypes = @@ -144,7 +144,7 @@ public: */ void releaseOutput(); -protected: + protected: /** * The constructor of NeuralNetwork. * The sub networks can get parameters_ and parameterMap_ diff --git a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp similarity index 98% rename from paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp rename to paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp index 85cfc59fbe..33d24b5b83 100644 --- a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.cpp +++ b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "ParallelNeuralNetwork.h" @@ -197,6 +197,7 @@ void ParallelThread::computeThread() { job_work.layer_->markAllInputGrad(); } } + hl_fini(); } void ParallelThread::start() { diff --git a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.h similarity index 98% rename from paddle/gserver/gradientmachines/ParallelNeuralNetwork.h rename to paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.h index e3b6812123..c091459506 100644 --- a/paddle/gserver/gradientmachines/ParallelNeuralNetwork.h +++ b/paddle/legacy/gserver/gradientmachines/ParallelNeuralNetwork.h @@ -32,7 +32,7 @@ enum TaskType { * multiple threads in parallel. */ class ParallelNeuralNetwork : public NeuralNetwork { -public: + public: ParallelNeuralNetwork(std::string subModelName = "", NeuralNetwork *rootNetwork = nullptr) : NeuralNetwork(subModelName, rootNetwork) {} @@ -66,7 +66,7 @@ public: // virtual void eval(Evaluator* evaluator); -protected: + protected: bool useGpu_; /// number of gpu devices int numDevices_; @@ -74,7 +74,7 @@ protected: }; class ParallelThread { -public: + public: ParallelThread(int threadId, int deviceId, bool useGpu); ~ParallelThread(); void jobEnqueue(LayerPtr layer, TaskType task); @@ -87,10 +87,10 @@ public: } void setForwardPassType(PassType passType) { passType_ = passType; } -protected: + protected: void computeThread(); -public: + public: struct Job { LayerPtr layer_; TaskType task_; @@ -98,7 +98,7 @@ public: typedef Queue JobQueue; JobQueue queue_; -protected: + protected: /// from 0 to threads-1 int threadId_; /// the GPU device Id which the computeThread_ used diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp similarity index 99% rename from paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp rename to paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp index 2429b5d1a0..e49f042404 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.cpp +++ b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.cpp @@ -19,10 +19,10 @@ limitations under the License. */ #include #include #include "NeuralNetwork.h" -#include "paddle/gserver/layers/AgentLayer.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/gserver/layers/AgentLayer.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" DEFINE_string(diy_beam_search_prob_so, "", "the diy beam search cost so"); @@ -96,7 +96,7 @@ static InitFunction __init__diy_prob_method( std::numeric_limits::max()); class BeamSearchControlCallbacks { -public: + public: RecurrentGradientMachine::BeamSearchCandidatesAdjustCallback beamSearchCandidateAdjust; RecurrentGradientMachine::NormOrDropNodeCallback normOrDropNode; @@ -115,7 +115,7 @@ public: }; class BeamSearchStatisticsCallbacks { -public: + public: RecurrentGradientMachine::EachStepCallback onEachStepStarted; RecurrentGradientMachine::EachStepCallback onEachStepStoped; @@ -148,11 +148,11 @@ RecurrentGradientMachine::RecurrentGradientMachine( * so it's should not be placed in root network. */ class BootBiasLayer : public Layer { -protected: + protected: std::unique_ptr biases_; IVectorPtr cpuIds_; -public: + public: explicit BootBiasLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h similarity index 99% rename from paddle/gserver/gradientmachines/RecurrentGradientMachine.h rename to paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h index 0032b72cda..0a13d4f6f8 100644 --- a/paddle/gserver/gradientmachines/RecurrentGradientMachine.h +++ b/paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h @@ -18,7 +18,7 @@ limitations under the License. */ #include "GradientMachine.h" #include "NeuralNetwork.h" -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" namespace paddle { @@ -30,7 +30,7 @@ class BeamSearchControlCallbacks; class BeamSearchStatisticsCallbacks; class RecurrentGradientMachine : public NeuralNetwork { -public: + public: RecurrentGradientMachine(const std::string& subModelName, NeuralNetwork* rootNetwork); @@ -290,7 +290,7 @@ public: return this->finalPaths_; } -protected: + protected: std::vector commonSeqInfo_; ICpuGpuVectorPtr sequenceStartPositions_; void calcSequenceStartPositions(); @@ -447,7 +447,7 @@ protected: MatrixPtr cpuProb_; IVectorPtr cpuEos_; -private: + private: /* * @return beam size in beam search */ diff --git a/paddle/gserver/layers/AddtoLayer.cpp b/paddle/legacy/gserver/layers/AddtoLayer.cpp similarity index 96% rename from paddle/gserver/layers/AddtoLayer.cpp rename to paddle/legacy/gserver/layers/AddtoLayer.cpp index 75e17f52df..39c5603d93 100644 --- a/paddle/gserver/layers/AddtoLayer.cpp +++ b/paddle/legacy/gserver/layers/AddtoLayer.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "AddtoLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/AddtoLayer.h b/paddle/legacy/gserver/layers/AddtoLayer.h similarity index 94% rename from paddle/gserver/layers/AddtoLayer.h rename to paddle/legacy/gserver/layers/AddtoLayer.h index 1d00063056..ad3cefe1a4 100644 --- a/paddle/gserver/layers/AddtoLayer.h +++ b/paddle/legacy/gserver/layers/AddtoLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -33,10 +33,10 @@ namespace paddle { * The config file api is addto_layer. */ class AddtoLayer : public Layer { -protected: + protected: std::unique_ptr biases_; -public: + public: explicit AddtoLayer(const LayerConfig& config) : Layer(config) {} ~AddtoLayer() {} diff --git a/paddle/gserver/layers/AgentLayer.cpp b/paddle/legacy/gserver/layers/AgentLayer.cpp similarity index 99% rename from paddle/gserver/layers/AgentLayer.cpp rename to paddle/legacy/gserver/layers/AgentLayer.cpp index e2f73f88f5..bae89b2fa3 100644 --- a/paddle/gserver/layers/AgentLayer.cpp +++ b/paddle/legacy/gserver/layers/AgentLayer.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "AgentLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/AgentLayer.h b/paddle/legacy/gserver/layers/AgentLayer.h similarity index 97% rename from paddle/gserver/layers/AgentLayer.h rename to paddle/legacy/gserver/layers/AgentLayer.h index da0ac45308..a05eac5e70 100644 --- a/paddle/gserver/layers/AgentLayer.h +++ b/paddle/legacy/gserver/layers/AgentLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -26,11 +26,11 @@ namespace paddle { * called to set one and only one real layer */ class AgentLayer : public Layer { -protected: + protected: LayerPtr realLayer_; int numSamples_; -public: + public: explicit AgentLayer(const LayerConfig& config) : Layer(config) {} ~AgentLayer() {} @@ -55,14 +55,14 @@ public: * GatherAgentLayer collect a complete sequence. */ class GatherAgentLayer : public Layer { -protected: + protected: std::vector realLayers_; std::vector idsVec_; // we don't clear idsVec_ vector to aviod IVector alloc/free IVectorPtr allIds_; std::vector idIndex_; -public: + public: explicit GatherAgentLayer(const LayerConfig& config) : Layer(config) {} virtual ~GatherAgentLayer() {} @@ -95,7 +95,7 @@ public: * if it is, the agent will select a few ids in real layer. */ class ScatterAgentLayer : public Layer { -protected: + protected: LayerPtr realLayer_; IVectorPtr ids_; IVectorPtr cpuIds_; @@ -113,7 +113,7 @@ protected: // true for setRealLayer, false for setRealLayerAndOutput bool selectionMode_; -public: + public: explicit ScatterAgentLayer(const LayerConfig& config) : Layer(config) {} virtual ~ScatterAgentLayer() {} diff --git a/paddle/gserver/layers/AverageLayer.cpp b/paddle/legacy/gserver/layers/AverageLayer.cpp similarity index 96% rename from paddle/gserver/layers/AverageLayer.cpp rename to paddle/legacy/gserver/layers/AverageLayer.cpp index b3787b1448..0539da7937 100644 --- a/paddle/gserver/layers/AverageLayer.cpp +++ b/paddle/legacy/gserver/layers/AverageLayer.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "AverageLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/AverageLayer.h b/paddle/legacy/gserver/layers/AverageLayer.h similarity index 97% rename from paddle/gserver/layers/AverageLayer.h rename to paddle/legacy/gserver/layers/AverageLayer.h index 24602d2a9c..a0d457d35f 100644 --- a/paddle/gserver/layers/AverageLayer.h +++ b/paddle/legacy/gserver/layers/AverageLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "SequencePoolLayer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -37,7 +37,7 @@ namespace paddle { * The config file api is pooling_layer. */ class AverageLayer : public SequencePoolLayer { -public: + public: enum AverageStrategy { kAverage = 0, kSum = 1, kAverageSquareRootN = 2 }; explicit AverageLayer(const LayerConfig& config) : SequencePoolLayer(config) {} @@ -48,7 +48,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: int mode_; }; } // namespace paddle diff --git a/paddle/gserver/layers/BatchNormBaseLayer.cpp b/paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp similarity index 98% rename from paddle/gserver/layers/BatchNormBaseLayer.cpp rename to paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp index a3516f9423..4dcbd8dc27 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.cpp +++ b/paddle/legacy/gserver/layers/BatchNormBaseLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "BatchNormBaseLayer.h" #include "BatchNormalizationLayer.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #ifdef PADDLE_WITH_CUDA #include "CudnnBatchNormLayer.h" #endif diff --git a/paddle/gserver/layers/BatchNormBaseLayer.h b/paddle/legacy/gserver/layers/BatchNormBaseLayer.h similarity index 98% rename from paddle/gserver/layers/BatchNormBaseLayer.h rename to paddle/legacy/gserver/layers/BatchNormBaseLayer.h index 69d642af4f..8dc1d78837 100644 --- a/paddle/gserver/layers/BatchNormBaseLayer.h +++ b/paddle/legacy/gserver/layers/BatchNormBaseLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -40,7 +40,7 @@ namespace paddle { */ class BatchNormBaseLayer : public Layer { -public: + public: explicit BatchNormBaseLayer(const LayerConfig& config) : Layer(config) {} ~BatchNormBaseLayer() {} @@ -61,7 +61,7 @@ public: */ void calFeatureMapSize(); -protected: + protected: /// Batch normalization scale parameter, which is referred to as gamma in /// in original paper. std::unique_ptr weight_; diff --git a/paddle/gserver/layers/BatchNormalizationLayer.cpp b/paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp similarity index 99% rename from paddle/gserver/layers/BatchNormalizationLayer.cpp rename to paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp index 59831dd904..0297bd44c7 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.cpp +++ b/paddle/legacy/gserver/layers/BatchNormalizationLayer.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #ifdef PADDLE_WITH_CUDA #include "hl_batch_transpose.h" #endif diff --git a/paddle/gserver/layers/BatchNormalizationLayer.h b/paddle/legacy/gserver/layers/BatchNormalizationLayer.h similarity index 99% rename from paddle/gserver/layers/BatchNormalizationLayer.h rename to paddle/legacy/gserver/layers/BatchNormalizationLayer.h index 95add69215..e5e4e690b6 100644 --- a/paddle/gserver/layers/BatchNormalizationLayer.h +++ b/paddle/legacy/gserver/layers/BatchNormalizationLayer.h @@ -27,7 +27,7 @@ namespace paddle { */ class BatchNormalizationLayer : public BatchNormBaseLayer { -public: + public: explicit BatchNormalizationLayer(const LayerConfig& config) : BatchNormBaseLayer(config), firstTest_(true) {} @@ -38,7 +38,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: /// Load pre-calculated mean and std. void setMeanAndStd(); diff --git a/paddle/gserver/layers/BilinearInterpLayer.cpp b/paddle/legacy/gserver/layers/BilinearInterpLayer.cpp similarity index 97% rename from paddle/gserver/layers/BilinearInterpLayer.cpp rename to paddle/legacy/gserver/layers/BilinearInterpLayer.cpp index 9775914596..a091f51bc2 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.cpp +++ b/paddle/legacy/gserver/layers/BilinearInterpLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "BilinearInterpLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/BilinearInterpLayer.h b/paddle/legacy/gserver/layers/BilinearInterpLayer.h similarity index 95% rename from paddle/gserver/layers/BilinearInterpLayer.h rename to paddle/legacy/gserver/layers/BilinearInterpLayer.h index acd320420f..c585a5ed10 100644 --- a/paddle/gserver/layers/BilinearInterpLayer.h +++ b/paddle/legacy/gserver/layers/BilinearInterpLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -26,13 +26,13 @@ namespace paddle { * @note The config file api is bilinear_interp_layer. */ class BilinearInterpLayer : public Layer { -protected: + protected: size_t outImgH_, outImgW_; size_t inImgH_, inImgW_; real ratioH_, ratioW_; size_t numChannels_; -public: + public: explicit BilinearInterpLayer(const LayerConfig& config) : Layer(config) {} virtual ~BilinearInterpLayer() {} diff --git a/paddle/gserver/layers/BlockExpandLayer.cpp b/paddle/legacy/gserver/layers/BlockExpandLayer.cpp similarity index 99% rename from paddle/gserver/layers/BlockExpandLayer.cpp rename to paddle/legacy/gserver/layers/BlockExpandLayer.cpp index 793d24e884..24b5af67d4 100644 --- a/paddle/gserver/layers/BlockExpandLayer.cpp +++ b/paddle/legacy/gserver/layers/BlockExpandLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "BlockExpandLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/gserver/layers/BlockExpandLayer.h b/paddle/legacy/gserver/layers/BlockExpandLayer.h similarity index 97% rename from paddle/gserver/layers/BlockExpandLayer.h rename to paddle/legacy/gserver/layers/BlockExpandLayer.h index 1797b64036..8b90249bfb 100644 --- a/paddle/gserver/layers/BlockExpandLayer.h +++ b/paddle/legacy/gserver/layers/BlockExpandLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -40,7 +40,7 @@ namespace paddle { * The config file api is block_expand_layer. */ class BlockExpandLayer : public Layer { -protected: + protected: /** * @brief Calculate outputH_ and outputW_ and return block number which * actually is time steps. @@ -53,7 +53,7 @@ protected: TensorShape inputShape_; TensorShape outputShape_; -public: + public: explicit BlockExpandLayer(const LayerConfig& config) : Layer(config) {} ~BlockExpandLayer() {} diff --git a/paddle/gserver/layers/CRFDecodingLayer.cpp b/paddle/legacy/gserver/layers/CRFDecodingLayer.cpp similarity index 100% rename from paddle/gserver/layers/CRFDecodingLayer.cpp rename to paddle/legacy/gserver/layers/CRFDecodingLayer.cpp diff --git a/paddle/gserver/layers/CRFDecodingLayer.h b/paddle/legacy/gserver/layers/CRFDecodingLayer.h similarity index 98% rename from paddle/gserver/layers/CRFDecodingLayer.h rename to paddle/legacy/gserver/layers/CRFDecodingLayer.h index fba3cebac1..018162e146 100644 --- a/paddle/gserver/layers/CRFDecodingLayer.h +++ b/paddle/legacy/gserver/layers/CRFDecodingLayer.h @@ -30,14 +30,14 @@ namespace paddle { * See LinearChainCRF.h for the detail of the CRF formulation. */ class CRFDecodingLayer : public CRFLayer { -public: + public: explicit CRFDecodingLayer(const LayerConfig& config) : CRFLayer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; -protected: + protected: std::unique_ptr crf_; }; diff --git a/paddle/gserver/layers/CRFLayer.cpp b/paddle/legacy/gserver/layers/CRFLayer.cpp similarity index 100% rename from paddle/gserver/layers/CRFLayer.cpp rename to paddle/legacy/gserver/layers/CRFLayer.cpp diff --git a/paddle/gserver/layers/CRFLayer.h b/paddle/legacy/gserver/layers/CRFLayer.h similarity index 98% rename from paddle/gserver/layers/CRFLayer.h rename to paddle/legacy/gserver/layers/CRFLayer.h index cb5bd05568..88c2ed343a 100644 --- a/paddle/gserver/layers/CRFLayer.h +++ b/paddle/legacy/gserver/layers/CRFLayer.h @@ -27,14 +27,14 @@ namespace paddle { * See class LinearChainCRF for the detail of the CRF formulation. */ class CRFLayer : public Layer { -public: + public: explicit CRFLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; -protected: + protected: size_t numClasses_; ParameterPtr parameter_; std::vector crfs_; diff --git a/paddle/gserver/layers/CTCLayer.cpp b/paddle/legacy/gserver/layers/CTCLayer.cpp similarity index 100% rename from paddle/gserver/layers/CTCLayer.cpp rename to paddle/legacy/gserver/layers/CTCLayer.cpp diff --git a/paddle/gserver/layers/CTCLayer.h b/paddle/legacy/gserver/layers/CTCLayer.h similarity index 98% rename from paddle/gserver/layers/CTCLayer.h rename to paddle/legacy/gserver/layers/CTCLayer.h index fcbc42565e..5d70b1f4ce 100644 --- a/paddle/gserver/layers/CTCLayer.h +++ b/paddle/legacy/gserver/layers/CTCLayer.h @@ -20,7 +20,7 @@ limitations under the License. */ namespace paddle { class CTCLayer : public Layer { -public: + public: explicit CTCLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; @@ -31,7 +31,7 @@ public: const Argument& softmaxSeqs, const Argument& labelSeqs); -protected: + protected: size_t numClasses_; bool normByTimes_; std::vector ctcs_; diff --git a/paddle/gserver/layers/ClipLayer.cpp b/paddle/legacy/gserver/layers/ClipLayer.cpp similarity index 99% rename from paddle/gserver/layers/ClipLayer.cpp rename to paddle/legacy/gserver/layers/ClipLayer.cpp index dbc3337499..6aa3c8fe64 100644 --- a/paddle/gserver/layers/ClipLayer.cpp +++ b/paddle/legacy/gserver/layers/ClipLayer.cpp @@ -24,11 +24,11 @@ namespace paddle { */ class ClipLayer : public Layer { -protected: + protected: double min_; double max_; -public: + public: explicit ClipLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/ConcatenateLayer.cpp b/paddle/legacy/gserver/layers/ConcatenateLayer.cpp similarity index 98% rename from paddle/gserver/layers/ConcatenateLayer.cpp rename to paddle/legacy/gserver/layers/ConcatenateLayer.cpp index f5ab29a509..ce3f2ca950 100644 --- a/paddle/gserver/layers/ConcatenateLayer.cpp +++ b/paddle/legacy/gserver/layers/ConcatenateLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Layer.h" #include "Projection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -23,7 +23,7 @@ namespace paddle { * each input as one row for the output of this layer and apply activation. */ class ConcatenateLayer : public Layer { -public: + public: explicit ConcatenateLayer(const LayerConfig& config) : Layer(config) {} ~ConcatenateLayer() {} @@ -97,7 +97,7 @@ void ConcatenateLayer::backward(const UpdateCallback& callback) { * processed by a Projection. */ class ConcatenateLayer2 : public Layer { -public: + public: explicit ConcatenateLayer2(const LayerConfig& config) : Layer(config) {} ~ConcatenateLayer2() {} @@ -108,7 +108,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: std::vector> projections_; std::vector projOutput_; std::vector> projCol_; diff --git a/paddle/gserver/layers/ContextProjection.cpp b/paddle/legacy/gserver/layers/ContextProjection.cpp similarity index 99% rename from paddle/gserver/layers/ContextProjection.cpp rename to paddle/legacy/gserver/layers/ContextProjection.cpp index 10c3cef0da..8bcf32663e 100644 --- a/paddle/gserver/layers/ContextProjection.cpp +++ b/paddle/legacy/gserver/layers/ContextProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ContextProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ContextProjection.h b/paddle/legacy/gserver/layers/ContextProjection.h similarity index 99% rename from paddle/gserver/layers/ContextProjection.h rename to paddle/legacy/gserver/layers/ContextProjection.h index e30f98f58d..9c21714541 100644 --- a/paddle/gserver/layers/ContextProjection.h +++ b/paddle/legacy/gserver/layers/ContextProjection.h @@ -42,7 +42,7 @@ namespace paddle { * The config file api is context_projection. */ class ContextProjection : public Projection { -public: + public: /** * Constructor. If context_start is zero and context_lenth is one, it will * set trainable_padding false. trainable_padding is an optional arguments @@ -63,7 +63,7 @@ public: virtual bool init(); -protected: + protected: std::unique_ptr weight_; /// number of extra timesteps added at the beginning size_t beginPad_; diff --git a/paddle/gserver/layers/Conv3DLayer.cpp b/paddle/legacy/gserver/layers/Conv3DLayer.cpp similarity index 99% rename from paddle/gserver/layers/Conv3DLayer.cpp rename to paddle/legacy/gserver/layers/Conv3DLayer.cpp index b38de86b15..d072a74234 100644 --- a/paddle/gserver/layers/Conv3DLayer.cpp +++ b/paddle/legacy/gserver/layers/Conv3DLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Conv3DLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/Conv3DLayer.h b/paddle/legacy/gserver/layers/Conv3DLayer.h similarity index 93% rename from paddle/gserver/layers/Conv3DLayer.h rename to paddle/legacy/gserver/layers/Conv3DLayer.h index 5ab5ff3d4a..cb42a2f36d 100644 --- a/paddle/gserver/layers/Conv3DLayer.h +++ b/paddle/legacy/gserver/layers/Conv3DLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include #include "ConvBaseLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -26,7 +26,7 @@ namespace paddle { * calculate convolution operation. */ class Conv3DLayer : public ConvBaseLayer { -public: + public: explicit Conv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {} ~Conv3DLayer() {} @@ -40,7 +40,7 @@ public: void bpropWeights(int i); size_t getSize(); -protected: + protected: // Figure out the dimensions for individual gemms. IntV M_; /// numFilters_ / filter_group_; IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_ diff --git a/paddle/gserver/layers/ConvBaseLayer.cpp b/paddle/legacy/gserver/layers/ConvBaseLayer.cpp similarity index 98% rename from paddle/gserver/layers/ConvBaseLayer.cpp rename to paddle/legacy/gserver/layers/ConvBaseLayer.cpp index 56bf4f9fcb..76120915e4 100644 --- a/paddle/gserver/layers/ConvBaseLayer.cpp +++ b/paddle/legacy/gserver/layers/ConvBaseLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvBaseLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { bool ConvBaseLayer::init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/ConvBaseLayer.h b/paddle/legacy/gserver/layers/ConvBaseLayer.h similarity index 98% rename from paddle/gserver/layers/ConvBaseLayer.h rename to paddle/legacy/gserver/layers/ConvBaseLayer.h index 93869fe68d..01e90e9996 100644 --- a/paddle/gserver/layers/ConvBaseLayer.h +++ b/paddle/legacy/gserver/layers/ConvBaseLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" namespace paddle { /** @@ -24,7 +24,7 @@ namespace paddle { */ class ConvBaseLayer : public Layer { -protected: + protected: typedef std::vector IntV; /// True if it's deconv layer, false if it's convolution layer @@ -88,7 +88,7 @@ protected: /// of output size. bool caffeMode_; -public: + public: explicit ConvBaseLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/ConvBaseOperator.cpp b/paddle/legacy/gserver/layers/ConvBaseOperator.cpp similarity index 98% rename from paddle/gserver/layers/ConvBaseOperator.cpp rename to paddle/legacy/gserver/layers/ConvBaseOperator.cpp index 317e7d5c60..e8e59b3bfe 100644 --- a/paddle/gserver/layers/ConvBaseOperator.cpp +++ b/paddle/legacy/gserver/layers/ConvBaseOperator.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvBaseOperator.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvBaseOperator.h b/paddle/legacy/gserver/layers/ConvBaseOperator.h similarity index 96% rename from paddle/gserver/layers/ConvBaseOperator.h rename to paddle/legacy/gserver/layers/ConvBaseOperator.h index 27fb0362d3..4ac77f2d74 100644 --- a/paddle/gserver/layers/ConvBaseOperator.h +++ b/paddle/legacy/gserver/layers/ConvBaseOperator.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include "Operator.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { */ class ConvBaseOperator : public Operator { -public: + public: ConvBaseOperator(const OperatorConfig &config, bool useGpu); /** * Free workspace in device and destroy cudnn tensor descriptor. @@ -46,7 +46,7 @@ public: hl_destroy_convolution_descriptor(convDesc_); } -protected: + protected: /** * Get convolution parameters from layer config and * initialize member variables. diff --git a/paddle/gserver/layers/ConvBaseProjection.cpp b/paddle/legacy/gserver/layers/ConvBaseProjection.cpp similarity index 99% rename from paddle/gserver/layers/ConvBaseProjection.cpp rename to paddle/legacy/gserver/layers/ConvBaseProjection.cpp index 39f433b78f..ff5d3412de 100644 --- a/paddle/gserver/layers/ConvBaseProjection.cpp +++ b/paddle/legacy/gserver/layers/ConvBaseProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvBaseProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvBaseProjection.h b/paddle/legacy/gserver/layers/ConvBaseProjection.h similarity index 98% rename from paddle/gserver/layers/ConvBaseProjection.h rename to paddle/legacy/gserver/layers/ConvBaseProjection.h index ba76d236d9..dcf5ce0f48 100644 --- a/paddle/gserver/layers/ConvBaseProjection.h +++ b/paddle/legacy/gserver/layers/ConvBaseProjection.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Projection.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" namespace paddle { @@ -23,7 +23,7 @@ namespace paddle { * @brief Base class for ConvProjection and ConvTransProjection. */ class ConvBaseProjection : public Projection { -public: + public: /** * Constructor. */ @@ -33,7 +33,7 @@ public: ~ConvBaseProjection(); -protected: + protected: void getConvParams(); void initCudnn(); diff --git a/paddle/gserver/layers/ConvOperator.cpp b/paddle/legacy/gserver/layers/ConvOperator.cpp similarity index 98% rename from paddle/gserver/layers/ConvOperator.cpp rename to paddle/legacy/gserver/layers/ConvOperator.cpp index 45498b92d3..5276b2c392 100644 --- a/paddle/gserver/layers/ConvOperator.cpp +++ b/paddle/legacy/gserver/layers/ConvOperator.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvOperator.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvOperator.h b/paddle/legacy/gserver/layers/ConvOperator.h similarity index 93% rename from paddle/gserver/layers/ConvOperator.h rename to paddle/legacy/gserver/layers/ConvOperator.h index fbdb7bb1cd..8f31620111 100644 --- a/paddle/gserver/layers/ConvOperator.h +++ b/paddle/legacy/gserver/layers/ConvOperator.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include "ConvBaseOperator.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { */ class ConvOperator : public ConvBaseOperator { -public: + public: ConvOperator(const OperatorConfig &config, bool useGpu) : ConvBaseOperator(config, useGpu) {} /** diff --git a/paddle/gserver/layers/ConvProjection.cpp b/paddle/legacy/gserver/layers/ConvProjection.cpp similarity index 99% rename from paddle/gserver/layers/ConvProjection.cpp rename to paddle/legacy/gserver/layers/ConvProjection.cpp index f382e6cab1..b40cdac258 100644 --- a/paddle/gserver/layers/ConvProjection.cpp +++ b/paddle/legacy/gserver/layers/ConvProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvProjection.h b/paddle/legacy/gserver/layers/ConvProjection.h similarity index 95% rename from paddle/gserver/layers/ConvProjection.h rename to paddle/legacy/gserver/layers/ConvProjection.h index e8ecb99431..890a17e2f8 100644 --- a/paddle/gserver/layers/ConvProjection.h +++ b/paddle/legacy/gserver/layers/ConvProjection.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "ConvBaseProjection.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" namespace paddle { @@ -23,7 +23,7 @@ namespace paddle { * @brief Convolution projection do the same calculation with CudnnConvLayer. */ class ConvProjection : public ConvBaseProjection { -public: + public: /** * Constructor. */ diff --git a/paddle/gserver/layers/ConvShiftLayer.cpp b/paddle/legacy/gserver/layers/ConvShiftLayer.cpp similarity index 95% rename from paddle/gserver/layers/ConvShiftLayer.cpp rename to paddle/legacy/gserver/layers/ConvShiftLayer.cpp index fb87771019..b7ecbe556c 100644 --- a/paddle/gserver/layers/ConvShiftLayer.cpp +++ b/paddle/legacy/gserver/layers/ConvShiftLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -42,7 +42,7 @@ namespace paddle { */ class ConvShiftLayer : public Layer { -public: + public: explicit ConvShiftLayer(const LayerConfig& config) : Layer(config) {} ~ConvShiftLayer() {} diff --git a/paddle/gserver/layers/ConvTransOperator.cpp b/paddle/legacy/gserver/layers/ConvTransOperator.cpp similarity index 98% rename from paddle/gserver/layers/ConvTransOperator.cpp rename to paddle/legacy/gserver/layers/ConvTransOperator.cpp index ac41d6f9a4..f4ce2affb1 100644 --- a/paddle/gserver/layers/ConvTransOperator.cpp +++ b/paddle/legacy/gserver/layers/ConvTransOperator.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvTransOperator.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvTransOperator.h b/paddle/legacy/gserver/layers/ConvTransOperator.h similarity index 93% rename from paddle/gserver/layers/ConvTransOperator.h rename to paddle/legacy/gserver/layers/ConvTransOperator.h index 1bf58f2bfb..206335a01f 100644 --- a/paddle/gserver/layers/ConvTransOperator.h +++ b/paddle/legacy/gserver/layers/ConvTransOperator.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include "ConvBaseOperator.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { */ class ConvTransOperator : public ConvBaseOperator { -public: + public: ConvTransOperator(const OperatorConfig &config, bool useGpu) : ConvBaseOperator(config, useGpu) {} /** diff --git a/paddle/gserver/layers/ConvTransProjection.cpp b/paddle/legacy/gserver/layers/ConvTransProjection.cpp similarity index 99% rename from paddle/gserver/layers/ConvTransProjection.cpp rename to paddle/legacy/gserver/layers/ConvTransProjection.cpp index 242ce34a60..00e34c8f2d 100644 --- a/paddle/gserver/layers/ConvTransProjection.cpp +++ b/paddle/legacy/gserver/layers/ConvTransProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ConvTransProjection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ConvTransProjection.h b/paddle/legacy/gserver/layers/ConvTransProjection.h similarity index 96% rename from paddle/gserver/layers/ConvTransProjection.h rename to paddle/legacy/gserver/layers/ConvTransProjection.h index 269b2694c8..9b63dd4735 100644 --- a/paddle/gserver/layers/ConvTransProjection.h +++ b/paddle/legacy/gserver/layers/ConvTransProjection.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "ConvBaseProjection.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" namespace paddle { @@ -23,7 +23,7 @@ namespace paddle { * @brief Convolution projection do the same calculation with CudnnConvLayer. */ class ConvTransProjection : public ConvBaseProjection { -public: + public: /** * Constructor. */ diff --git a/paddle/gserver/layers/ConvexCombinationLayer.cpp b/paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp similarity index 97% rename from paddle/gserver/layers/ConvexCombinationLayer.cpp rename to paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp index dce751940c..c38ab251f1 100644 --- a/paddle/gserver/layers/ConvexCombinationLayer.cpp +++ b/paddle/legacy/gserver/layers/ConvexCombinationLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -36,7 +36,7 @@ namespace paddle { * The config file api is linear_comb_layer. */ class ConvexCombinationLayer : public Layer { -protected: + protected: /// A matrix pointer pointing to second input. MatrixPtr tmpMtx0; /// A matrix pointer pointing to first input. @@ -44,7 +44,7 @@ protected: /// A matrix pointer pointing to output. MatrixPtr tmpRow1; -public: + public: explicit ConvexCombinationLayer(const LayerConfig& config) : Layer(config) {} ~ConvexCombinationLayer() {} diff --git a/paddle/gserver/layers/CosSimLayer.cpp b/paddle/legacy/gserver/layers/CosSimLayer.cpp similarity index 97% rename from paddle/gserver/layers/CosSimLayer.cpp rename to paddle/legacy/gserver/layers/CosSimLayer.cpp index 4e44a5e8df..ab8d7cc1f6 100644 --- a/paddle/gserver/layers/CosSimLayer.cpp +++ b/paddle/legacy/gserver/layers/CosSimLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CosSimLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CosSimLayer.h b/paddle/legacy/gserver/layers/CosSimLayer.h similarity index 93% rename from paddle/gserver/layers/CosSimLayer.h rename to paddle/legacy/gserver/layers/CosSimLayer.h index 675cdb16b5..b08e2c6a35 100644 --- a/paddle/gserver/layers/CosSimLayer.h +++ b/paddle/legacy/gserver/layers/CosSimLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { /** @@ -33,7 +33,7 @@ namespace paddle { * The config file api is cos_sim. */ class CosSimLayer : public Layer { -public: + public: explicit CosSimLayer(const LayerConfig& config) : Layer(config) {} ~CosSimLayer() {} diff --git a/paddle/gserver/layers/CosSimVecMatLayer.cpp b/paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp similarity index 97% rename from paddle/gserver/layers/CosSimVecMatLayer.cpp rename to paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp index 685b4e8ef3..03de0be815 100644 --- a/paddle/gserver/layers/CosSimVecMatLayer.cpp +++ b/paddle/legacy/gserver/layers/CosSimVecMatLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { /** @@ -32,7 +32,7 @@ namespace paddle { */ class CosSimVecMatLayer : public Layer { -protected: + protected: MatrixPtr tmpMtx0; MatrixPtr tmpMtx1; MatrixPtr tmpRow0; @@ -40,7 +40,7 @@ protected: MatrixPtr tmpRow2; MatrixPtr tmpRow3; -public: + public: explicit CosSimVecMatLayer(const LayerConfig& config) : Layer(config) {} ~CosSimVecMatLayer() {} diff --git a/paddle/gserver/layers/CostLayer.cpp b/paddle/legacy/gserver/layers/CostLayer.cpp similarity index 99% rename from paddle/gserver/layers/CostLayer.cpp rename to paddle/legacy/gserver/layers/CostLayer.cpp index 484f803a83..18b5b77bde 100644 --- a/paddle/gserver/layers/CostLayer.cpp +++ b/paddle/legacy/gserver/layers/CostLayer.cpp @@ -16,9 +16,9 @@ limitations under the License. */ #include #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" namespace paddle { @@ -716,7 +716,7 @@ void HuberTwoClassification::backwardImp(Matrix& output, * \f] */ class SumCostLayer : public Layer { -public: + public: explicit SumCostLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/CostLayer.h b/paddle/legacy/gserver/layers/CostLayer.h similarity index 98% rename from paddle/gserver/layers/CostLayer.h rename to paddle/legacy/gserver/layers/CostLayer.h index 306c067ed1..9bfec0e2b1 100644 --- a/paddle/gserver/layers/CostLayer.h +++ b/paddle/legacy/gserver/layers/CostLayer.h @@ -29,7 +29,7 @@ namespace paddle { * handled by the base class. */ class CostLayer : public Layer { -public: + public: explicit CostLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -51,7 +51,7 @@ public: Argument& label, Matrix& outputGrad) = 0; -protected: + protected: LayerPtr weightLayer_; real coeff_; }; @@ -65,7 +65,7 @@ protected: * \f] */ class MultiClassCrossEntropy : public CostLayer { -public: + public: explicit MultiClassCrossEntropy(const LayerConfig& config) : CostLayer(config) {} @@ -95,7 +95,7 @@ public: * In Proceedings of the ACL 2014 Conference. */ class MultiClassCrossEntropyWithSelfNorm : public CostLayer { -public: + public: explicit MultiClassCrossEntropyWithSelfNorm(const LayerConfig& config) : CostLayer(config) {} @@ -108,7 +108,7 @@ public: Argument& label, Matrix& outputGrad) override; -protected: + protected: MatrixPtr sftMaxSum_; MatrixPtr sumInv_; }; @@ -120,7 +120,7 @@ protected: * \f] */ class SoftBinaryClassCrossEntropy : public CostLayer { -public: + public: explicit SoftBinaryClassCrossEntropy(const LayerConfig& config) : CostLayer(config) {} @@ -133,7 +133,7 @@ public: Argument& label, Matrix& outputGrad) override; -protected: + protected: MatrixPtr targetPerDim_; }; @@ -145,7 +145,7 @@ protected: * \f] */ class SumOfSquaresCostLayer : public CostLayer { -public: + public: explicit SumOfSquaresCostLayer(const LayerConfig& config) : CostLayer(config) {} @@ -171,7 +171,7 @@ public: * x = output - label */ class SmoothL1CostLayer : public CostLayer { -public: + public: explicit SmoothL1CostLayer(const LayerConfig& config) : CostLayer(config) {} bool init(const LayerMap& layerMap, @@ -197,7 +197,7 @@ public: * Rank useing Gradient Descent. */ class RankingCost : public Layer { -public: + public: explicit RankingCost(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -225,7 +225,7 @@ public: (void)outputGrad; } -private: + private: double posPairCount_; double negPairCount_; MatrixPtr margin_; @@ -250,7 +250,7 @@ private: * with Nonsmooth Cost Functions. */ class LambdaCost : public Layer { -public: + public: explicit LambdaCost(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -270,7 +270,7 @@ public: real* gradData, int size); -private: + private: MatrixPtr marginGrad_; int truncationSize_; int maxSortSize_; @@ -287,10 +287,10 @@ private: * \f] */ class MultiBinaryLabelCrossEntropy : public CostLayer { -protected: + protected: MatrixPtr targetPerDim_; -public: + public: explicit MultiBinaryLabelCrossEntropy(const LayerConfig& config) : CostLayer(config) {} @@ -308,7 +308,7 @@ public: * A base layer for HuberRegressionLoss and HuberTwoClassification. */ class HuberCost : public CostLayer { -public: + public: std::vector tmpCpuInput_; explicit HuberCost(const LayerConfig& config) : CostLayer(config) {} @@ -331,7 +331,7 @@ public: * Loss = delta * abs(y - f) - 0.5 * delta^2, otherwise */ class HuberRegressionLoss : public HuberCost { -public: + public: explicit HuberRegressionLoss(const LayerConfig& config) : HuberCost(config) {} bool init(const LayerMap& layerMap, @@ -343,7 +343,7 @@ public: Argument& label, Matrix& outputGrad) override; -protected: + protected: real delta_; }; @@ -356,7 +356,7 @@ protected: * Loss = 0, otherwise */ class HuberTwoClassification : public HuberCost { -public: + public: explicit HuberTwoClassification(const LayerConfig& config) : HuberCost(config) {} diff --git a/paddle/gserver/layers/CropLayer.cpp b/paddle/legacy/gserver/layers/CropLayer.cpp similarity index 99% rename from paddle/gserver/layers/CropLayer.cpp rename to paddle/legacy/gserver/layers/CropLayer.cpp index bc97ca2f9e..d891375ecc 100644 --- a/paddle/gserver/layers/CropLayer.cpp +++ b/paddle/legacy/gserver/layers/CropLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CropLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { REGISTER_LAYER(crop, CropLayer); diff --git a/paddle/gserver/layers/CropLayer.h b/paddle/legacy/gserver/layers/CropLayer.h similarity index 98% rename from paddle/gserver/layers/CropLayer.h rename to paddle/legacy/gserver/layers/CropLayer.h index 1a85911ef7..ef88bc483d 100644 --- a/paddle/gserver/layers/CropLayer.h +++ b/paddle/legacy/gserver/layers/CropLayer.h @@ -28,7 +28,7 @@ namespace paddle { * crop input as this shape conf */ class CropLayer : public Layer { -public: + public: explicit CropLayer(const LayerConfig& config) : Layer(config) {} ~CropLayer() {} @@ -38,7 +38,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: void setOutDims(); void setInDims(); diff --git a/paddle/gserver/layers/CrossChannelNormLayer.cpp b/paddle/legacy/gserver/layers/CrossChannelNormLayer.cpp similarity index 98% rename from paddle/gserver/layers/CrossChannelNormLayer.cpp rename to paddle/legacy/gserver/layers/CrossChannelNormLayer.cpp index 644450291e..0fe100a96c 100644 --- a/paddle/gserver/layers/CrossChannelNormLayer.cpp +++ b/paddle/legacy/gserver/layers/CrossChannelNormLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Layer.h" #include "NormLayer.h" -#include "paddle/math/BaseMatrix.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/BaseMatrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { diff --git a/paddle/gserver/layers/CrossEntropyOverBeam.cpp b/paddle/legacy/gserver/layers/CrossEntropyOverBeam.cpp similarity index 100% rename from paddle/gserver/layers/CrossEntropyOverBeam.cpp rename to paddle/legacy/gserver/layers/CrossEntropyOverBeam.cpp diff --git a/paddle/gserver/layers/CrossEntropyOverBeam.h b/paddle/legacy/gserver/layers/CrossEntropyOverBeam.h similarity index 99% rename from paddle/gserver/layers/CrossEntropyOverBeam.h rename to paddle/legacy/gserver/layers/CrossEntropyOverBeam.h index b47a2933c2..c8702b1616 100644 --- a/paddle/gserver/layers/CrossEntropyOverBeam.h +++ b/paddle/legacy/gserver/layers/CrossEntropyOverBeam.h @@ -44,7 +44,7 @@ struct BeamExpansion { typedef std::shared_ptr BeamExpansionPtr; class CostForOneSequence { -public: + public: CostForOneSequence() : beamSize_(0), validExpansionCount_(0), goldAsExtraPath_(false) {} void setData(const BeamExpansionPtr bPtr, size_t beamSize) { @@ -64,7 +64,7 @@ public: real forward(); void backward(); -private: + private: void calValidExpandStep(); void constructTotalExpansion(); size_t initLastExpansion(); @@ -93,14 +93,14 @@ private: }; class CrossEntropyOverBeam : public Layer { -public: + public: explicit CrossEntropyOverBeam(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; -private: + private: void checkInputs(); void copyInputsToCpu(); void resizeOutput(); diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.cpp b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp similarity index 98% rename from paddle/gserver/layers/CudnnBatchNormLayer.cpp rename to paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp index 9a29e6a55e..051155e0d2 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.cpp +++ b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "CudnnBatchNormLayer.h" #include "Layer.h" -#include "paddle/cuda/include/hl_batch_norm.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/cuda/include/hl_batch_norm.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CudnnBatchNormLayer.h b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.h similarity index 97% rename from paddle/gserver/layers/CudnnBatchNormLayer.h rename to paddle/legacy/gserver/layers/CudnnBatchNormLayer.h index aa279f73d6..3b33b983b3 100644 --- a/paddle/gserver/layers/CudnnBatchNormLayer.h +++ b/paddle/legacy/gserver/layers/CudnnBatchNormLayer.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "BatchNormBaseLayer.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -30,7 +30,7 @@ namespace paddle { */ class CudnnBatchNormLayer : public BatchNormBaseLayer { -public: + public: explicit CudnnBatchNormLayer(const LayerConfig& config) : BatchNormBaseLayer(config) {} @@ -46,7 +46,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: /// Epsilon value used in the batch normalization formula. /// Same epsilon value should be used in forward and backward functions. double eps_; diff --git a/paddle/gserver/layers/CudnnConvBaseLayer.cpp b/paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp similarity index 98% rename from paddle/gserver/layers/CudnnConvBaseLayer.cpp rename to paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp index 6d0a40a607..9353cca9c8 100644 --- a/paddle/gserver/layers/CudnnConvBaseLayer.cpp +++ b/paddle/legacy/gserver/layers/CudnnConvBaseLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CudnnConvBaseLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { REGISTER_LAYER(cudnn_conv, CudnnConvBaseLayer); diff --git a/paddle/gserver/layers/CudnnConvBaseLayer.h b/paddle/legacy/gserver/layers/CudnnConvBaseLayer.h similarity index 96% rename from paddle/gserver/layers/CudnnConvBaseLayer.h rename to paddle/legacy/gserver/layers/CudnnConvBaseLayer.h index 698104e4fb..d050183eb7 100644 --- a/paddle/gserver/layers/CudnnConvBaseLayer.h +++ b/paddle/legacy/gserver/layers/CudnnConvBaseLayer.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "ConvBaseLayer.h" #include "Projection.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -31,14 +31,14 @@ namespace paddle { * The config file api is img_conv_layer. */ class CudnnConvBaseLayer : public ConvBaseLayer { -protected: + protected: std::vector> projConf_; std::vector> projections_; hl_tensor_descriptor biasDesc_; hl_tensor_descriptor outputDesc_; -public: + public: explicit CudnnConvBaseLayer(const LayerConfig& config) : ConvBaseLayer(config) {} diff --git a/paddle/gserver/layers/CudnnPoolLayer.cpp b/paddle/legacy/gserver/layers/CudnnPoolLayer.cpp similarity index 97% rename from paddle/gserver/layers/CudnnPoolLayer.cpp rename to paddle/legacy/gserver/layers/CudnnPoolLayer.cpp index ac6d2168f4..c790dfd71e 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.cpp +++ b/paddle/legacy/gserver/layers/CudnnPoolLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "CudnnPoolLayer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/CudnnPoolLayer.h b/paddle/legacy/gserver/layers/CudnnPoolLayer.h similarity index 98% rename from paddle/gserver/layers/CudnnPoolLayer.h rename to paddle/legacy/gserver/layers/CudnnPoolLayer.h index 9eb4fc6138..fc249354d1 100644 --- a/paddle/gserver/layers/CudnnPoolLayer.h +++ b/paddle/legacy/gserver/layers/CudnnPoolLayer.h @@ -26,7 +26,7 @@ namespace paddle { */ class CudnnPoolLayer : public PoolLayer { -protected: + protected: int windowHeight, windowWidth; int heightPadding, widthPadding, strideHeight, strideWidth; int imageH_, imageW_, outputH_, outputW_; @@ -40,7 +40,7 @@ protected: /// A description of a pooling operation. hl_pooling_descriptor poolingDesc_; -public: + public: static bool typeCheck(const std::string& poolType, hl_pooling_mode_t* mode = nullptr); explicit CudnnPoolLayer(const LayerConfig& config); diff --git a/paddle/gserver/layers/DataLayer.cpp b/paddle/legacy/gserver/layers/DataLayer.cpp similarity index 100% rename from paddle/gserver/layers/DataLayer.cpp rename to paddle/legacy/gserver/layers/DataLayer.cpp diff --git a/paddle/gserver/layers/DataLayer.h b/paddle/legacy/gserver/layers/DataLayer.h similarity index 98% rename from paddle/gserver/layers/DataLayer.h rename to paddle/legacy/gserver/layers/DataLayer.h index 4b12afe0ef..d02f5a4697 100644 --- a/paddle/gserver/layers/DataLayer.h +++ b/paddle/legacy/gserver/layers/DataLayer.h @@ -25,7 +25,7 @@ namespace paddle { * The config file api is data_layer. */ class DataLayer : public Layer { -public: + public: explicit DataLayer(const LayerConfig& config) : Layer(config) {} virtual void setData(const Argument& data) { data_ = data; } @@ -58,10 +58,10 @@ public: } } -private: + private: void copyDataToOutput(Argument& output); -protected: + protected: Argument data_; }; diff --git a/paddle/gserver/layers/DataNormLayer.cpp b/paddle/legacy/gserver/layers/DataNormLayer.cpp similarity index 98% rename from paddle/gserver/layers/DataNormLayer.cpp rename to paddle/legacy/gserver/layers/DataNormLayer.cpp index 86da4d6f95..6820dfa4d4 100644 --- a/paddle/gserver/layers/DataNormLayer.cpp +++ b/paddle/legacy/gserver/layers/DataNormLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "DataNormLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/DataNormLayer.h b/paddle/legacy/gserver/layers/DataNormLayer.h similarity index 94% rename from paddle/gserver/layers/DataNormLayer.h rename to paddle/legacy/gserver/layers/DataNormLayer.h index 2a2a2a4aa7..7bb8e92824 100644 --- a/paddle/gserver/layers/DataNormLayer.h +++ b/paddle/legacy/gserver/layers/DataNormLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -37,7 +37,7 @@ namespace paddle { */ class DataNormLayer : public Layer { -public: + public: enum NormalizationStrategy { kZScore = 0, kMinMax = 1, kDecimalScaling = 2 }; explicit DataNormLayer(const LayerConfig& config) : Layer(config) {} @@ -50,7 +50,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: int mode_; std::unique_ptr weight_; MatrixPtr min_; diff --git a/paddle/gserver/layers/DeConv3DLayer.cpp b/paddle/legacy/gserver/layers/DeConv3DLayer.cpp similarity index 99% rename from paddle/gserver/layers/DeConv3DLayer.cpp rename to paddle/legacy/gserver/layers/DeConv3DLayer.cpp index db6d6e073c..2cd635564c 100644 --- a/paddle/gserver/layers/DeConv3DLayer.cpp +++ b/paddle/legacy/gserver/layers/DeConv3DLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "DeConv3DLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/DeConv3DLayer.h b/paddle/legacy/gserver/layers/DeConv3DLayer.h similarity index 93% rename from paddle/gserver/layers/DeConv3DLayer.h rename to paddle/legacy/gserver/layers/DeConv3DLayer.h index 57d51cdec6..9931bccb12 100644 --- a/paddle/gserver/layers/DeConv3DLayer.h +++ b/paddle/legacy/gserver/layers/DeConv3DLayer.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "ConvBaseLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -27,7 +27,7 @@ namespace paddle { * calculate deconvolution3D operation. */ class DeConv3DLayer : public ConvBaseLayer { -public: + public: explicit DeConv3DLayer(const LayerConfig& config) : ConvBaseLayer(config) {} ~DeConv3DLayer() {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); @@ -40,7 +40,7 @@ public: void bpropWeights(int i); size_t getSize(); -protected: + protected: // Figure out the dimensions for individual gemms. IntV M_; /// numFilters_ / filter_group_; IntV N_; /// channels_ * filterSizeZ_ * filterSize_ * filterSizeY_ diff --git a/paddle/gserver/layers/DetectionOutputLayer.cpp b/paddle/legacy/gserver/layers/DetectionOutputLayer.cpp similarity index 100% rename from paddle/gserver/layers/DetectionOutputLayer.cpp rename to paddle/legacy/gserver/layers/DetectionOutputLayer.cpp diff --git a/paddle/gserver/layers/DetectionOutputLayer.h b/paddle/legacy/gserver/layers/DetectionOutputLayer.h similarity index 98% rename from paddle/gserver/layers/DetectionOutputLayer.h rename to paddle/legacy/gserver/layers/DetectionOutputLayer.h index 174a6e5d9a..b0270ed331 100644 --- a/paddle/gserver/layers/DetectionOutputLayer.h +++ b/paddle/legacy/gserver/layers/DetectionOutputLayer.h @@ -33,7 +33,7 @@ namespace paddle { */ class DetectionOutputLayer : public Layer { -public: + public: explicit DetectionOutputLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); @@ -42,7 +42,7 @@ public: void backward(const UpdateCallback& callback = nullptr) {} -protected: + protected: inline LayerPtr getPriorBoxLayer() { return inputLayers_[0]; } inline LayerPtr getLocInputLayer(size_t index) { @@ -53,7 +53,7 @@ protected: return inputLayers_[1 + inputNum_ + index]; } -private: + private: size_t numClasses_; // number of classes size_t inputNum_; // number of input layers real nmsThreshold_; diff --git a/paddle/gserver/layers/DetectionUtil.cpp b/paddle/legacy/gserver/layers/DetectionUtil.cpp similarity index 100% rename from paddle/gserver/layers/DetectionUtil.cpp rename to paddle/legacy/gserver/layers/DetectionUtil.cpp diff --git a/paddle/gserver/layers/DetectionUtil.h b/paddle/legacy/gserver/layers/DetectionUtil.h similarity index 99% rename from paddle/gserver/layers/DetectionUtil.h rename to paddle/legacy/gserver/layers/DetectionUtil.h index d6502fcf8f..c1e0bb809a 100644 --- a/paddle/gserver/layers/DetectionUtil.h +++ b/paddle/legacy/gserver/layers/DetectionUtil.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" using std::vector; using std::pair; diff --git a/paddle/gserver/layers/DotMulOperator.cpp b/paddle/legacy/gserver/layers/DotMulOperator.cpp similarity index 99% rename from paddle/gserver/layers/DotMulOperator.cpp rename to paddle/legacy/gserver/layers/DotMulOperator.cpp index 68db2929ad..03d18d9b23 100644 --- a/paddle/gserver/layers/DotMulOperator.cpp +++ b/paddle/legacy/gserver/layers/DotMulOperator.cpp @@ -27,7 +27,7 @@ namespace paddle { * The config file api is dotmul_operator. */ class DotMulOperator : public Operator { -public: + public: DotMulOperator(const OperatorConfig& config, bool useGpu); virtual void forward(); virtual void backward(); diff --git a/paddle/gserver/layers/DotMulProjection.cpp b/paddle/legacy/gserver/layers/DotMulProjection.cpp similarity index 98% rename from paddle/gserver/layers/DotMulProjection.cpp rename to paddle/legacy/gserver/layers/DotMulProjection.cpp index 86453aae84..d778038767 100644 --- a/paddle/gserver/layers/DotMulProjection.cpp +++ b/paddle/legacy/gserver/layers/DotMulProjection.cpp @@ -26,14 +26,14 @@ namespace paddle { * The config file api is dotmul_projection. */ class DotMulProjection : public Projection { -public: + public: DotMulProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu); virtual void forward(); virtual void backward(const UpdateCallback& callback); -protected: + protected: /// shared memory with parameter std::unique_ptr weight_; }; diff --git a/paddle/gserver/layers/DotProdLayer.cpp b/paddle/legacy/gserver/layers/DotProdLayer.cpp similarity index 95% rename from paddle/gserver/layers/DotProdLayer.cpp rename to paddle/legacy/gserver/layers/DotProdLayer.cpp index 5148d93e27..06060d93f7 100644 --- a/paddle/gserver/layers/DotProdLayer.cpp +++ b/paddle/legacy/gserver/layers/DotProdLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -27,7 +27,7 @@ namespace paddle { */ class DotProdLayer : public Layer { -public: + public: explicit DotProdLayer(const LayerConfig& config) : Layer(config) {} ~DotProdLayer() {} diff --git a/paddle/gserver/layers/EosIdCheckLayer.cpp b/paddle/legacy/gserver/layers/EosIdCheckLayer.cpp similarity index 96% rename from paddle/gserver/layers/EosIdCheckLayer.cpp rename to paddle/legacy/gserver/layers/EosIdCheckLayer.cpp index 470a5b8ea2..38671126c6 100644 --- a/paddle/gserver/layers/EosIdCheckLayer.cpp +++ b/paddle/legacy/gserver/layers/EosIdCheckLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { /** @@ -24,7 +24,7 @@ namespace paddle { * It is used by recurrent layer group. */ class EosIdCheckLayer : public Layer { -public: + public: explicit EosIdCheckLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/ExpandConvLayer.cpp b/paddle/legacy/gserver/layers/ExpandConvLayer.cpp similarity index 99% rename from paddle/gserver/layers/ExpandConvLayer.cpp rename to paddle/legacy/gserver/layers/ExpandConvLayer.cpp index 3a84786582..8a53db3806 100644 --- a/paddle/gserver/layers/ExpandConvLayer.cpp +++ b/paddle/legacy/gserver/layers/ExpandConvLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ExpandConvLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" DEFINE_bool(use_nnpack, false, diff --git a/paddle/gserver/layers/ExpandConvLayer.h b/paddle/legacy/gserver/layers/ExpandConvLayer.h similarity index 95% rename from paddle/gserver/layers/ExpandConvLayer.h rename to paddle/legacy/gserver/layers/ExpandConvLayer.h index be968155ef..c0eff3ab06 100644 --- a/paddle/gserver/layers/ExpandConvLayer.h +++ b/paddle/legacy/gserver/layers/ExpandConvLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "ConvBaseLayer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { */ class ExpandConvLayer : public ConvBaseLayer { -public: + public: explicit ExpandConvLayer(const LayerConfig& config) : ConvBaseLayer(config) {} ~ExpandConvLayer() {} @@ -42,7 +42,7 @@ public: size_t getOutputSize(); -protected: + protected: std::vector inputShape_; std::vector filterShape_; std::vector outputShape_; diff --git a/paddle/gserver/layers/ExpandLayer.cpp b/paddle/legacy/gserver/layers/ExpandLayer.cpp similarity index 98% rename from paddle/gserver/layers/ExpandLayer.cpp rename to paddle/legacy/gserver/layers/ExpandLayer.cpp index 6b57767540..074fbab8ef 100644 --- a/paddle/gserver/layers/ExpandLayer.cpp +++ b/paddle/legacy/gserver/layers/ExpandLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ExpandLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ExpandLayer.h b/paddle/legacy/gserver/layers/ExpandLayer.h similarity index 96% rename from paddle/gserver/layers/ExpandLayer.h rename to paddle/legacy/gserver/layers/ExpandLayer.h index 04bbfcbd04..75a1ec7568 100644 --- a/paddle/gserver/layers/ExpandLayer.h +++ b/paddle/legacy/gserver/layers/ExpandLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -37,7 +37,7 @@ namespace paddle { */ class ExpandLayer : public Layer { -protected: + protected: std::unique_ptr biases_; /// if input[0] is dense data, ExpandLevel=kNonSeq; /// if input[0] is sequence data, ExpandLevel=kSeq @@ -48,7 +48,7 @@ protected: /// of input[1] ICpuGpuVectorPtr expandStartsPos_; -public: + public: explicit ExpandLayer(const LayerConfig& config) : Layer(config) {} ~ExpandLayer() {} diff --git a/paddle/gserver/layers/FactorizationMachineLayer.cpp b/paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp similarity index 97% rename from paddle/gserver/layers/FactorizationMachineLayer.cpp rename to paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp index 1744faada2..6cf269fa3f 100644 --- a/paddle/gserver/layers/FactorizationMachineLayer.cpp +++ b/paddle/legacy/gserver/layers/FactorizationMachineLayer.cpp @@ -15,9 +15,9 @@ limitations under the License. */ #include "FactorizationMachineLayer.h" #include #include -#include "paddle/math/SparseMatrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/FactorizationMachineLayer.h b/paddle/legacy/gserver/layers/FactorizationMachineLayer.h similarity index 95% rename from paddle/gserver/layers/FactorizationMachineLayer.h rename to paddle/legacy/gserver/layers/FactorizationMachineLayer.h index 684da4e65a..fc015ed727 100644 --- a/paddle/gserver/layers/FactorizationMachineLayer.h +++ b/paddle/legacy/gserver/layers/FactorizationMachineLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { /** @@ -42,7 +42,7 @@ namespace paddle { */ class FactorizationMachineLayer : public Layer { -protected: + protected: // The latent vectors, shape: (size, factorSize_) // Each row of the latentVectors_ matrix is the latent vector // corresponding to one input feature dimension @@ -50,7 +50,7 @@ protected: // The hyperparameter that defines the dimensionality of the factorization size_t factorSize_; -private: + private: // Store the square values of the letent vectors matrix MatrixPtr latentVectorsSquare_; // Store the square values of input matrix @@ -65,7 +65,7 @@ private: // Negative identity matrix MatrixPtr negOnes_; -public: + public: explicit FactorizationMachineLayer(const LayerConfig& config) : Layer(config) {} ~FactorizationMachineLayer() {} diff --git a/paddle/gserver/layers/FeatureMapExpandLayer.cpp b/paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp similarity index 98% rename from paddle/gserver/layers/FeatureMapExpandLayer.cpp rename to paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp index 81b98da45b..a3fe1433e4 100644 --- a/paddle/gserver/layers/FeatureMapExpandLayer.cpp +++ b/paddle/legacy/gserver/layers/FeatureMapExpandLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -38,11 +38,11 @@ namespace paddle { */ class FeatureMapExpandLayer : public Layer { -private: + private: int numFilters_; bool asRowVector_; -public: + public: explicit FeatureMapExpandLayer(const LayerConfig& config) : Layer(config) {} ~FeatureMapExpandLayer() {} diff --git a/paddle/gserver/layers/FullMatrixProjection.cpp b/paddle/legacy/gserver/layers/FullMatrixProjection.cpp similarity index 100% rename from paddle/gserver/layers/FullMatrixProjection.cpp rename to paddle/legacy/gserver/layers/FullMatrixProjection.cpp diff --git a/paddle/gserver/layers/FullMatrixProjection.h b/paddle/legacy/gserver/layers/FullMatrixProjection.h similarity index 95% rename from paddle/gserver/layers/FullMatrixProjection.h rename to paddle/legacy/gserver/layers/FullMatrixProjection.h index 7c4cd1a706..c33d02a3ae 100644 --- a/paddle/gserver/layers/FullMatrixProjection.h +++ b/paddle/legacy/gserver/layers/FullMatrixProjection.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" #include "Projection.h" @@ -28,14 +28,14 @@ namespace paddle { * The config file api is full_matrix_projection. */ class FullMatrixProjection : public Projection { -public: + public: FullMatrixProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu); virtual void forward(); virtual void backward(const UpdateCallback& callback); -protected: + protected: std::unique_ptr weight_; }; diff --git a/paddle/gserver/layers/FullyConnectedLayer.cpp b/paddle/legacy/gserver/layers/FullyConnectedLayer.cpp similarity index 97% rename from paddle/gserver/layers/FullyConnectedLayer.cpp rename to paddle/legacy/gserver/layers/FullyConnectedLayer.cpp index 21ffa01d95..07f4dfbe39 100644 --- a/paddle/gserver/layers/FullyConnectedLayer.cpp +++ b/paddle/legacy/gserver/layers/FullyConnectedLayer.cpp @@ -15,9 +15,9 @@ limitations under the License. */ #include "FullyConnectedLayer.h" #include #include -#include "paddle/math/SparseMatrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/FullyConnectedLayer.h b/paddle/legacy/gserver/layers/FullyConnectedLayer.h similarity index 92% rename from paddle/gserver/layers/FullyConnectedLayer.h rename to paddle/legacy/gserver/layers/FullyConnectedLayer.h index e66aeeb733..7e29cac043 100644 --- a/paddle/gserver/layers/FullyConnectedLayer.h +++ b/paddle/legacy/gserver/layers/FullyConnectedLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { /** @@ -28,11 +28,11 @@ namespace paddle { */ class FullyConnectedLayer : public Layer { -protected: + protected: WeightList weights_; std::unique_ptr biases_; -public: + public: explicit FullyConnectedLayer(const LayerConfig& config) : Layer(config) {} ~FullyConnectedLayer() {} diff --git a/paddle/gserver/layers/GatedRecurrentLayer.cpp b/paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp similarity index 99% rename from paddle/gserver/layers/GatedRecurrentLayer.cpp rename to paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp index 9d38849fdf..bdcd445cb4 100644 --- a/paddle/gserver/layers/GatedRecurrentLayer.cpp +++ b/paddle/legacy/gserver/layers/GatedRecurrentLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "GatedRecurrentLayer.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/GatedRecurrentLayer.h b/paddle/legacy/gserver/layers/GatedRecurrentLayer.h similarity index 97% rename from paddle/gserver/layers/GatedRecurrentLayer.h rename to paddle/legacy/gserver/layers/GatedRecurrentLayer.h index f0a3a82301..8bbf01ce20 100644 --- a/paddle/gserver/layers/GatedRecurrentLayer.h +++ b/paddle/legacy/gserver/layers/GatedRecurrentLayer.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "GruCompute.h" #include "Layer.h" #include "SequenceToBatch.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -47,7 +47,7 @@ namespace paddle { */ class GatedRecurrentLayer : public Layer, public GruCompute { -public: + public: explicit GatedRecurrentLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -63,7 +63,7 @@ public: LayerStatePtr getState() override; -protected: + protected: void forwardSequence(int batchSize, size_t numSequences, const int* starts, @@ -79,7 +79,7 @@ protected: MatrixPtr inputValue); void backwardBatch(int batchSize, MatrixPtr inputGrad); -protected: + protected: std::unique_ptr weight_; std::unique_ptr gateWeight_; std::unique_ptr stateWeight_; diff --git a/paddle/gserver/layers/GetOutputLayer.cpp b/paddle/legacy/gserver/layers/GetOutputLayer.cpp similarity index 99% rename from paddle/gserver/layers/GetOutputLayer.cpp rename to paddle/legacy/gserver/layers/GetOutputLayer.cpp index f255681f3e..7c1e3c407c 100644 --- a/paddle/gserver/layers/GetOutputLayer.cpp +++ b/paddle/legacy/gserver/layers/GetOutputLayer.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { class GetOutputLayer : public Layer { -public: + public: explicit GetOutputLayer(const LayerConfig& config) : Layer(config) {} ~GetOutputLayer() {} diff --git a/paddle/gserver/layers/GruCompute.cpp b/paddle/legacy/gserver/layers/GruCompute.cpp similarity index 95% rename from paddle/gserver/layers/GruCompute.cpp rename to paddle/legacy/gserver/layers/GruCompute.cpp index 48ddbc413e..adad6285b7 100644 --- a/paddle/gserver/layers/GruCompute.cpp +++ b/paddle/legacy/gserver/layers/GruCompute.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "GruCompute.h" #include "hl_recurrent_apply.cuh" -#include "paddle/function/GruFunctor.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/function/GruFunctor.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/layers/GruCompute.cu b/paddle/legacy/gserver/layers/GruCompute.cu similarity index 100% rename from paddle/gserver/layers/GruCompute.cu rename to paddle/legacy/gserver/layers/GruCompute.cu diff --git a/paddle/gserver/layers/GruCompute.h b/paddle/legacy/gserver/layers/GruCompute.h similarity index 94% rename from paddle/gserver/layers/GruCompute.h rename to paddle/legacy/gserver/layers/GruCompute.h index fb6bc56422..6feea7aca8 100644 --- a/paddle/gserver/layers/GruCompute.h +++ b/paddle/legacy/gserver/layers/GruCompute.h @@ -16,12 +16,12 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { class GruCompute { -public: + public: void init(LayerConfig &config); template @@ -33,7 +33,7 @@ public: int frameSize, int batchSize = 1); -public: + public: hl_activation_mode_t activeNode_; hl_activation_mode_t activeGate_; }; diff --git a/paddle/gserver/layers/GruStepLayer.cpp b/paddle/legacy/gserver/layers/GruStepLayer.cpp similarity index 98% rename from paddle/gserver/layers/GruStepLayer.cpp rename to paddle/legacy/gserver/layers/GruStepLayer.cpp index 917c50250c..2480e42d68 100644 --- a/paddle/gserver/layers/GruStepLayer.cpp +++ b/paddle/legacy/gserver/layers/GruStepLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "GruCompute.h" #include "Layer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -44,13 +44,13 @@ namespace paddle { * The config file api if gru_step_layer. */ class GruStepLayer : public Layer, public GruCompute { -protected: + protected: Argument gate_; Argument resetOutput_; std::unique_ptr weight_; std::unique_ptr bias_; -public: + public: explicit GruStepLayer(const LayerConfig& config) : Layer(config) {} ~GruStepLayer() {} diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp b/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp similarity index 99% rename from paddle/gserver/layers/HierarchicalSigmoidLayer.cpp rename to paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp index 3e720f179e..3449599409 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.cpp +++ b/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "HierarchicalSigmoidLayer.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/layers/HierarchicalSigmoidLayer.h b/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.h similarity index 99% rename from paddle/gserver/layers/HierarchicalSigmoidLayer.h rename to paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.h index 10e501f180..73ef252fd5 100644 --- a/paddle/gserver/layers/HierarchicalSigmoidLayer.h +++ b/paddle/legacy/gserver/layers/HierarchicalSigmoidLayer.h @@ -58,7 +58,7 @@ namespace paddle { * The config file api is hsigmod_layer. */ class HierarchicalSigmoidLayer : public Layer { -public: + public: explicit HierarchicalSigmoidLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -66,7 +66,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; -protected: + protected: /** * The last of inputs is label layer. */ diff --git a/paddle/gserver/layers/IdentityProjection.cpp b/paddle/legacy/gserver/layers/IdentityProjection.cpp similarity index 98% rename from paddle/gserver/layers/IdentityProjection.cpp rename to paddle/legacy/gserver/layers/IdentityProjection.cpp index 6c70f77acc..f707642e09 100644 --- a/paddle/gserver/layers/IdentityProjection.cpp +++ b/paddle/legacy/gserver/layers/IdentityProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Projection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -26,7 +26,7 @@ namespace paddle { * The config file api is identity_projection. */ class IdentityProjection : public Projection { -public: + public: IdentityProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu); @@ -68,7 +68,7 @@ void IdentityProjection::backward(const UpdateCallback& callback) { * The config file api is identity_projection. */ class IdentityOffsetProjection : public Projection { -public: + public: IdentityOffsetProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu); diff --git a/paddle/gserver/layers/InterpolationLayer.cpp b/paddle/legacy/gserver/layers/InterpolationLayer.cpp similarity index 96% rename from paddle/gserver/layers/InterpolationLayer.cpp rename to paddle/legacy/gserver/layers/InterpolationLayer.cpp index 0ac92024bc..ed2294e8a3 100644 --- a/paddle/gserver/layers/InterpolationLayer.cpp +++ b/paddle/legacy/gserver/layers/InterpolationLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -33,12 +33,12 @@ namespace paddle { */ class InterpolationLayer : public Layer { -protected: + protected: /// weightLast = 1 - weight MatrixPtr weightLast_; MatrixPtr tmpMatrix; -public: + public: explicit InterpolationLayer(const LayerConfig& config) : Layer(config) {} ~InterpolationLayer() {} diff --git a/paddle/gserver/layers/KmaxSeqScoreLayer.cpp b/paddle/legacy/gserver/layers/KmaxSeqScoreLayer.cpp similarity index 99% rename from paddle/gserver/layers/KmaxSeqScoreLayer.cpp rename to paddle/legacy/gserver/layers/KmaxSeqScoreLayer.cpp index 0ea960902e..7fd25954ef 100644 --- a/paddle/gserver/layers/KmaxSeqScoreLayer.cpp +++ b/paddle/legacy/gserver/layers/KmaxSeqScoreLayer.cpp @@ -17,14 +17,14 @@ limitations under the License. */ namespace paddle { class KmaxSeqScoreLayer : public Layer { -private: + private: MatrixPtr scores_; size_t beamSize_; void kmaxScorePerSeq(const real* score, real* sortedRes, const ICpuGpuVectorPtr seqStartPos); -public: + public: explicit KmaxSeqScoreLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/L2DistanceLayer.cpp b/paddle/legacy/gserver/layers/L2DistanceLayer.cpp similarity index 97% rename from paddle/gserver/layers/L2DistanceLayer.cpp rename to paddle/legacy/gserver/layers/L2DistanceLayer.cpp index c8cca3762c..a3e627e570 100644 --- a/paddle/gserver/layers/L2DistanceLayer.cpp +++ b/paddle/legacy/gserver/layers/L2DistanceLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "L2DistanceLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/L2DistanceLayer.h b/paddle/legacy/gserver/layers/L2DistanceLayer.h similarity index 96% rename from paddle/gserver/layers/L2DistanceLayer.h rename to paddle/legacy/gserver/layers/L2DistanceLayer.h index 97f35daf78..aa8aabd9ca 100644 --- a/paddle/gserver/layers/L2DistanceLayer.h +++ b/paddle/legacy/gserver/layers/L2DistanceLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -33,7 +33,7 @@ namespace paddle { */ class L2DistanceLayer : public Layer { -public: + public: explicit L2DistanceLayer(const LayerConfig& config) : Layer(config) {} ~L2DistanceLayer() {} @@ -43,7 +43,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -private: + private: // Store the result of subtracting Input2 from Input1 in forward computation, // which will be reused in backward computation. MatrixPtr inputSub_; diff --git a/paddle/gserver/layers/Layer.cpp b/paddle/legacy/gserver/layers/Layer.cpp similarity index 98% rename from paddle/gserver/layers/Layer.cpp rename to paddle/legacy/gserver/layers/Layer.cpp index 32e2f4c9dd..890d33552d 100644 --- a/paddle/gserver/layers/Layer.cpp +++ b/paddle/legacy/gserver/layers/Layer.cpp @@ -12,12 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include "CostLayer.h" -#include "paddle/math/SparseMatrix.h" -#include "paddle/utils/Error.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/Error.h" +#include "paddle/legacy/utils/Logging.h" #ifndef PADDLE_MOBILE_INFERENCE #include "ValidationLayer.h" diff --git a/paddle/gserver/layers/Layer.h b/paddle/legacy/gserver/layers/Layer.h similarity index 96% rename from paddle/gserver/layers/Layer.h rename to paddle/legacy/gserver/layers/Layer.h index 8da342a00f..a7ff76dece 100644 --- a/paddle/gserver/layers/Layer.h +++ b/paddle/legacy/gserver/layers/Layer.h @@ -17,14 +17,14 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/function/Function.h" -#include "paddle/gserver/activations/ActivationFunction.h" -#include "paddle/math/CpuSparseMatrix.h" -#include "paddle/parameter/Argument.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/Weight.h" -#include "paddle/utils/ClassRegistrar.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/function/Function.h" +#include "paddle/legacy/gserver/activations/ActivationFunction.h" +#include "paddle/legacy/math/CpuSparseMatrix.h" +#include "paddle/legacy/parameter/Argument.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/parameter/Weight.h" +#include "paddle/legacy/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/Util.h" /// Macro for registering a layer type. /// Example: REGISTER_LAYER(crf_error, CRFDecodingErrorLayer); @@ -60,7 +60,7 @@ enum PADDLE_DEVICE_ID { * Define necessary variables and functions for every layer. */ class Layer { -protected: + protected: /// Layer config LayerConfig config_; /// whether to use GPU @@ -112,7 +112,7 @@ protected: /// Layer backward function std::vector> backward_; -public: + public: /** * Wait until all input value ready. * Called before Layer::forward() function. @@ -137,7 +137,7 @@ public: */ virtual void markAllInputGrad(); -protected: + protected: /** * Create layer function. Function is called in forward or backward. * \param function, Layer::forward_ or Layer::backward_ @@ -252,7 +252,7 @@ protected: */ void addOutputArgument(int deviceId); -public: + public: explicit Layer(const LayerConfig& config, bool useGpu = FLAGS_use_gpu); virtual ~Layer() {} @@ -490,7 +490,7 @@ public: */ virtual void onPassEnd() {} -protected: + protected: /** * Forward of activation function. */ diff --git a/paddle/gserver/layers/LinearChainCRF.cpp b/paddle/legacy/gserver/layers/LinearChainCRF.cpp similarity index 100% rename from paddle/gserver/layers/LinearChainCRF.cpp rename to paddle/legacy/gserver/layers/LinearChainCRF.cpp diff --git a/paddle/gserver/layers/LinearChainCRF.h b/paddle/legacy/gserver/layers/LinearChainCRF.h similarity index 98% rename from paddle/gserver/layers/LinearChainCRF.h rename to paddle/legacy/gserver/layers/LinearChainCRF.h index 1ea4c7e105..65e2390543 100644 --- a/paddle/gserver/layers/LinearChainCRF.h +++ b/paddle/legacy/gserver/layers/LinearChainCRF.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { class LinearChainCRF { -public: + public: /** * The size of para must be \f$(numClasses + 2) * numClasses\f$. * The first numClasses values of para are for starting weights (\f$a\f$). @@ -71,7 +71,7 @@ public: */ MatrixPtr getXGrad() { return matGrad_; } -protected: + protected: int numClasses_; MatrixPtr a_; MatrixPtr b_; diff --git a/paddle/gserver/layers/LinearChainCTC.cpp b/paddle/legacy/gserver/layers/LinearChainCTC.cpp similarity index 100% rename from paddle/gserver/layers/LinearChainCTC.cpp rename to paddle/legacy/gserver/layers/LinearChainCTC.cpp diff --git a/paddle/gserver/layers/LinearChainCTC.h b/paddle/legacy/gserver/layers/LinearChainCTC.h similarity index 95% rename from paddle/gserver/layers/LinearChainCTC.h rename to paddle/legacy/gserver/layers/LinearChainCTC.h index 0b774277dc..e6c4c7bfe0 100644 --- a/paddle/gserver/layers/LinearChainCTC.h +++ b/paddle/legacy/gserver/layers/LinearChainCTC.h @@ -15,12 +15,12 @@ limitations under the License. */ #pragma once #include -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { class LinearChainCTC { -public: + public: LinearChainCTC(int numClasses, bool normByTimes); // Calculate the negative log probability as loss @@ -35,7 +35,7 @@ public: int* labelSeq, int labelSeqLen); -protected: + protected: int numClasses_, blank_, totalSegments_, totalTime_; bool normByTimes_; bool isInvalid_; diff --git a/paddle/gserver/layers/LstmCompute.cpp b/paddle/legacy/gserver/layers/LstmCompute.cpp similarity index 98% rename from paddle/gserver/layers/LstmCompute.cpp rename to paddle/legacy/gserver/layers/LstmCompute.cpp index ea30f6d6b1..70f08e1d4e 100644 --- a/paddle/gserver/layers/LstmCompute.cpp +++ b/paddle/legacy/gserver/layers/LstmCompute.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "LstmCompute.h" #include "hl_recurrent_apply.cuh" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/gserver/layers/LstmCompute.cu b/paddle/legacy/gserver/layers/LstmCompute.cu similarity index 100% rename from paddle/gserver/layers/LstmCompute.cu rename to paddle/legacy/gserver/layers/LstmCompute.cu diff --git a/paddle/gserver/layers/LstmCompute.h b/paddle/legacy/gserver/layers/LstmCompute.h similarity index 97% rename from paddle/gserver/layers/LstmCompute.h rename to paddle/legacy/gserver/layers/LstmCompute.h index b7d55eb1f9..ac40c35ef1 100644 --- a/paddle/gserver/layers/LstmCompute.h +++ b/paddle/legacy/gserver/layers/LstmCompute.h @@ -16,12 +16,12 @@ limitations under the License. */ #include "ModelConfig.pb.h" #include "hl_gpu.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { class LstmCompute { -public: + public: void init(LayerConfig &config); /** @@ -57,7 +57,7 @@ public: hl_lstm_grad grad, int frameSize); -public: + public: hl_activation_mode_t activeNode_; hl_activation_mode_t activeGate_; hl_activation_mode_t activeState_; diff --git a/paddle/gserver/layers/LstmLayer.cpp b/paddle/legacy/gserver/layers/LstmLayer.cpp similarity index 99% rename from paddle/gserver/layers/LstmLayer.cpp rename to paddle/legacy/gserver/layers/LstmLayer.cpp index f65ae6a3e6..43a55d8d49 100644 --- a/paddle/gserver/layers/LstmLayer.cpp +++ b/paddle/legacy/gserver/layers/LstmLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "LstmLayer.h" -#include "paddle/math/BaseMatrix.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/BaseMatrix.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_bool(prev_batch_state); diff --git a/paddle/gserver/layers/LstmLayer.h b/paddle/legacy/gserver/layers/LstmLayer.h similarity index 98% rename from paddle/gserver/layers/LstmLayer.h rename to paddle/legacy/gserver/layers/LstmLayer.h index 4568b13ade..8c8b382f50 100644 --- a/paddle/gserver/layers/LstmLayer.h +++ b/paddle/legacy/gserver/layers/LstmLayer.h @@ -17,8 +17,8 @@ limitations under the License. */ #include "Layer.h" #include "LstmCompute.h" #include "SequenceToBatch.h" -#include "paddle/math/BaseMatrix.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/BaseMatrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** @@ -71,7 +71,7 @@ namespace paddle { */ class LstmLayer : public Layer, public LstmCompute { -public: + public: explicit LstmLayer(const LayerConfig &config) : Layer(config) {} bool init(const LayerMap &layerMap, @@ -87,7 +87,7 @@ public: LayerStatePtr getState() override; -protected: + protected: /** * @brief Compute lstm forward one sequence by one sequence. * @param batchSize The batchSize is not equal to the batch_size in @@ -165,7 +165,7 @@ protected: */ void getPrevBatchState(size_t numSequences); -protected: + protected: /// Learned parameters, shape: (size, 4*size). /// The weight ([size, 4*size]) contains \f$W_{hi}, W_{hf}, W_{hc}, W_{ho}\f$. std::unique_ptr weight_; diff --git a/paddle/gserver/layers/LstmStepLayer.cpp b/paddle/legacy/gserver/layers/LstmStepLayer.cpp similarity index 99% rename from paddle/gserver/layers/LstmStepLayer.cpp rename to paddle/legacy/gserver/layers/LstmStepLayer.cpp index 8faaa1c4e1..f02f8ad62f 100644 --- a/paddle/gserver/layers/LstmStepLayer.cpp +++ b/paddle/legacy/gserver/layers/LstmStepLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Layer.h" #include "LstmCompute.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -22,7 +22,7 @@ namespace paddle { * LstmStepLayer used in recurrent layer group. */ class LstmStepLayer : public Layer, public LstmCompute { -protected: + protected: Argument state_; Argument gate_; Argument stateActive_; @@ -30,7 +30,7 @@ protected: MatrixPtr checkIgGrad_, checkFgGrad_, checkOgGrad_; std::unique_ptr weight_; -public: + public: explicit LstmStepLayer(const LayerConfig& config) : Layer(config) {} ~LstmStepLayer() {} diff --git a/paddle/gserver/layers/MDLstmLayer.cpp b/paddle/legacy/gserver/layers/MDLstmLayer.cpp similarity index 99% rename from paddle/gserver/layers/MDLstmLayer.cpp rename to paddle/legacy/gserver/layers/MDLstmLayer.cpp index 7cfdb3ff25..4838183e8c 100644 --- a/paddle/gserver/layers/MDLstmLayer.cpp +++ b/paddle/legacy/gserver/layers/MDLstmLayer.cpp @@ -13,13 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "LstmLayer.h" -#include "paddle/math/BaseMatrix.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/BaseMatrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { class CoordIterator { -public: + public: std::vector dims_; std::vector directions_; std::vector curPos_; @@ -51,7 +51,7 @@ public: } } -public: + public: CoordIterator(std::vector dim, std::vector directions) : dims_(dim), directions_(directions), end_(false) { CHECK_EQ(dims_.size(), directions_.size()); @@ -178,7 +178,7 @@ public: * */ class MDLstmLayer : public LstmLayer { -public: + public: explicit MDLstmLayer(const LayerConfig& config) : LstmLayer(config) {} bool init(const LayerMap& layerMap, @@ -188,13 +188,13 @@ public: void backward(const UpdateCallback& callback) override; -protected: + protected: void forwardOneSequence(int start, CoordIterator& coordIter); void backwardOneSequence(int start, CoordIterator& coordIter); void forwardGate2OutputSequence(int start, CoordIterator& coordIter); void backwardGate2OutputSequence(int start, CoordIterator& coordIter); -protected: + protected: std::vector frameInputGate_; std::vector frameForgetGate_; std::vector frameOutputGate_; diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNAddtoLayer.cpp similarity index 100% rename from paddle/gserver/layers/MKLDNNAddtoLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNAddtoLayer.cpp diff --git a/paddle/gserver/layers/MKLDNNAddtoLayer.h b/paddle/legacy/gserver/layers/MKLDNNAddtoLayer.h similarity index 98% rename from paddle/gserver/layers/MKLDNNAddtoLayer.h rename to paddle/legacy/gserver/layers/MKLDNNAddtoLayer.h index e40e2f2251..0b385e804f 100644 --- a/paddle/gserver/layers/MKLDNNAddtoLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNAddtoLayer.h @@ -25,7 +25,7 @@ namespace paddle { * The config file api is mkldnn_addto */ class MKLDNNAddtoLayer : public MKLDNNLayer { -protected: + protected: // layer size == ic * ih * iw == oc * oh *ow, and can not be changed size_t layerSize_; @@ -38,7 +38,7 @@ protected: std::vector> fwdBias_; std::shared_ptr bwdBias_; -public: + public: explicit MKLDNNAddtoLayer(const LayerConfig& config) : MKLDNNLayer(config) {} ~MKLDNNAddtoLayer() {} @@ -59,7 +59,7 @@ public: void updateWeights(const UpdateCallback& callback) override; -protected: + protected: void resetFwdBuffers(std::vector& inputs, MKLDNNMatrixPtr& bias, MKLDNNMatrixPtr& out); diff --git a/paddle/gserver/layers/MKLDNNBase.h b/paddle/legacy/gserver/layers/MKLDNNBase.h similarity index 98% rename from paddle/gserver/layers/MKLDNNBase.h rename to paddle/legacy/gserver/layers/MKLDNNBase.h index d84e285940..786ceaf860 100644 --- a/paddle/gserver/layers/MKLDNNBase.h +++ b/paddle/legacy/gserver/layers/MKLDNNBase.h @@ -31,7 +31,7 @@ typedef enum { * */ class CPUEngine { -public: + public: static CPUEngine& Instance() { // Thread-safe in C++11. static CPUEngine myInstance; @@ -46,12 +46,12 @@ public: mkldnn::engine& getEngine() { return cpuEngine_; } -protected: + protected: CPUEngine() : cpuEngine_(mkldnn::engine::cpu, 0) {} // CPUEngine() : cpuEngine_(mkldnn::engine::cpu_lazy, 0) {} ~CPUEngine() {} -private: + private: mkldnn::engine cpuEngine_; }; @@ -60,7 +60,7 @@ private: * */ class MKLDNNStream { -public: + public: MKLDNNStream() : ready_(false) { resetState(); } virtual ~MKLDNNStream() {} @@ -89,7 +89,7 @@ public: ready_ = true; } -private: + private: bool ready_; std::shared_ptr stream_; }; diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNBatchNormLayer.cpp similarity index 100% rename from paddle/gserver/layers/MKLDNNBatchNormLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNBatchNormLayer.cpp diff --git a/paddle/gserver/layers/MKLDNNBatchNormLayer.h b/paddle/legacy/gserver/layers/MKLDNNBatchNormLayer.h similarity index 99% rename from paddle/gserver/layers/MKLDNNBatchNormLayer.h rename to paddle/legacy/gserver/layers/MKLDNNBatchNormLayer.h index 93e182206a..9aa20df98f 100644 --- a/paddle/gserver/layers/MKLDNNBatchNormLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNBatchNormLayer.h @@ -27,7 +27,7 @@ typedef mkldnn::batch_normalization_backward bn_bwd; * The config file api is mkldnn_batch_norm */ class MKLDNNBatchNormLayer : public MKLDNNLayer { -protected: + protected: // save forward primitive_desc, which can be used backward std::shared_ptr fwdPD_; @@ -62,7 +62,7 @@ protected: MKLDNNMatrixPtr mean_; MKLDNNMatrixPtr var_; -public: + public: explicit MKLDNNBatchNormLayer(const LayerConfig& config) : MKLDNNLayer(config), useGlobalStats_(true), hasInitedWgt_(false) {} @@ -88,7 +88,7 @@ public: void convertWeightsFromPaddle() override; -protected: + protected: void initWeight(); /** * cal moving mean and variance. diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNConcatLayer.cpp similarity index 100% rename from paddle/gserver/layers/MKLDNNConcatLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNConcatLayer.cpp diff --git a/paddle/gserver/layers/MKLDNNConcatLayer.h b/paddle/legacy/gserver/layers/MKLDNNConcatLayer.h similarity index 99% rename from paddle/gserver/layers/MKLDNNConcatLayer.h rename to paddle/legacy/gserver/layers/MKLDNNConcatLayer.h index f7abdabfb5..d7738df6c1 100644 --- a/paddle/gserver/layers/MKLDNNConcatLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNConcatLayer.h @@ -25,7 +25,7 @@ namespace paddle { * The config file api is mkldnn_concat */ class MKLDNNConcatLayer : public MKLDNNLayer { -protected: + protected: std::vector> bwds_; // input channel numbers std::vector channels_; @@ -35,7 +35,7 @@ protected: // if axis_ == 1, concat channel (default) int axis_; -public: + public: explicit MKLDNNConcatLayer(const LayerConfig& config) : MKLDNNLayer(config), axis_(1) {} @@ -75,7 +75,7 @@ public: return totalSize; } -protected: + protected: void resetFwdBuffers(std::vector& inputs, MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, diff --git a/paddle/gserver/layers/MKLDNNConvLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp similarity index 99% rename from paddle/gserver/layers/MKLDNNConvLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp index a442a0a013..b47bf14821 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNConvLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNConvLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/gserver/layers/MKLDNNConvLayer.h b/paddle/legacy/gserver/layers/MKLDNNConvLayer.h similarity index 99% rename from paddle/gserver/layers/MKLDNNConvLayer.h rename to paddle/legacy/gserver/layers/MKLDNNConvLayer.h index 29c8735fbb..d399035ed3 100644 --- a/paddle/gserver/layers/MKLDNNConvLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNConvLayer.h @@ -28,7 +28,7 @@ typedef mkldnn::convolution_backward_data conv_bwdData; * The config file api is mkldnn_conv */ class MKLDNNConvLayer : public MKLDNNLayer { -protected: + protected: // padding height and width int ph_, pw_; // stride height and width @@ -59,7 +59,7 @@ protected: std::unique_ptr weight_; std::unique_ptr biases_; -public: + public: explicit MKLDNNConvLayer(const LayerConfig& config) : MKLDNNLayer(config), hasInitedWgt_(false), caffeMode_(true) {} @@ -92,7 +92,7 @@ public: << ", sw: " << sw_ << ", dh: " << dh_ << ", dw: " << dw_; } -protected: + protected: /** * load the dims settings of this conv */ diff --git a/paddle/gserver/layers/MKLDNNFcLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp similarity index 99% rename from paddle/gserver/layers/MKLDNNFcLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp index 0c7e6f16e2..f3747c7db8 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNFcLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNFcLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/gserver/layers/MKLDNNFcLayer.h b/paddle/legacy/gserver/layers/MKLDNNFcLayer.h similarity index 99% rename from paddle/gserver/layers/MKLDNNFcLayer.h rename to paddle/legacy/gserver/layers/MKLDNNFcLayer.h index 0d41a4379d..a704066cc8 100644 --- a/paddle/gserver/layers/MKLDNNFcLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNFcLayer.h @@ -28,7 +28,7 @@ typedef mkldnn::inner_product_backward_data fc_bwdData; * The config file api is mkldnn_fc */ class MKLDNNFcLayer : public MKLDNNLayer { -protected: + protected: // input layer size, can not be change after init size_t iLayerSize_; // == ic * ih * iw @@ -42,7 +42,7 @@ protected: std::unique_ptr weight_; std::unique_ptr biases_; -public: + public: explicit MKLDNNFcLayer(const LayerConfig& config) : MKLDNNLayer(config), hasInitedWgt_(false) {} @@ -68,7 +68,7 @@ public: void convertWeightsToPaddle() override; -protected: + protected: void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& wgt, MKLDNNMatrixPtr& bias, diff --git a/paddle/gserver/layers/MKLDNNLRNLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp similarity index 99% rename from paddle/gserver/layers/MKLDNNLRNLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp index 88513ab8bc..739482348f 100644 --- a/paddle/gserver/layers/MKLDNNLRNLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNLRNLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNLRNLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/gserver/layers/MKLDNNLRNLayer.h b/paddle/legacy/gserver/layers/MKLDNNLRNLayer.h similarity index 98% rename from paddle/gserver/layers/MKLDNNLRNLayer.h rename to paddle/legacy/gserver/layers/MKLDNNLRNLayer.h index b503ee5594..028438f2c9 100644 --- a/paddle/gserver/layers/MKLDNNLRNLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNLRNLayer.h @@ -27,7 +27,7 @@ typedef mkldnn::lrn_backward lrn_bwd; * The config file api is mkldnn_lrn */ class MKLDNNLRNLayer : public MKLDNNLayer { -protected: + protected: // save forward primitive_desc, which can be used in backward std::shared_ptr fwdPD_; // according to https://github.com/01org/mkl-dnn/blob/master/tests/gtests/ @@ -37,7 +37,7 @@ protected: int localSize_; float alpha_, beta_; // scale and pow in paddle -public: + public: explicit MKLDNNLRNLayer(const LayerConfig& config) : MKLDNNLayer(config) {} ~MKLDNNLRNLayer() {} @@ -56,7 +56,7 @@ public: std::vector& inputs, MKLDNNMatrixPtr& out) override; -protected: + protected: void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr in, diff --git a/paddle/gserver/layers/MKLDNNLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNLayer.cpp similarity index 100% rename from paddle/gserver/layers/MKLDNNLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNLayer.cpp diff --git a/paddle/gserver/layers/MKLDNNLayer.h b/paddle/legacy/gserver/layers/MKLDNNLayer.h similarity index 99% rename from paddle/gserver/layers/MKLDNNLayer.h rename to paddle/legacy/gserver/layers/MKLDNNLayer.h index 4a7eb74ce3..94dc8625f6 100644 --- a/paddle/gserver/layers/MKLDNNLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNLayer.h @@ -18,8 +18,8 @@ limitations under the License. */ #include "Layer.h" #include "MKLDNNBase.h" #include "mkldnn.hpp" -#include "paddle/math/MKLDNNMatrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/MKLDNNMatrix.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_bool(use_mkldnn); @@ -33,7 +33,7 @@ typedef std::shared_ptr MKLDNNLayerPtr; * */ class MKLDNNLayer : public Layer { -protected: + protected: // batch size int bs_; // their sizes are always from the first input layer @@ -95,7 +95,7 @@ protected: // tmp input argument to save input grad, only used to merge grad Argument tmpInArg_; -public: + public: explicit MKLDNNLayer(const LayerConfig& config) : Layer(config), ih_(0), @@ -162,7 +162,7 @@ public: */ void addOutputArgument(int deviceId) { Layer::addOutputArgument(deviceId); } -protected: + protected: /** * Some layers may have different condition to reset the forward. * The function returns the condition that do not need reset forward. @@ -233,7 +233,7 @@ protected: */ void resetMergeGrad(MKLDNNMatrixPtr& out); -protected: + protected: /** * Set deviceId of this layer. */ @@ -340,7 +340,7 @@ protected: } } -private: + private: /** * clear all grad */ diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.cpp b/paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp similarity index 98% rename from paddle/gserver/layers/MKLDNNPoolLayer.cpp rename to paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp index 3be848c749..83d980538d 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.cpp +++ b/paddle/legacy/gserver/layers/MKLDNNPoolLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNPoolLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/Logging.h" using namespace mkldnn; // NOLINT typedef memory::format format; diff --git a/paddle/gserver/layers/MKLDNNPoolLayer.h b/paddle/legacy/gserver/layers/MKLDNNPoolLayer.h similarity index 99% rename from paddle/gserver/layers/MKLDNNPoolLayer.h rename to paddle/legacy/gserver/layers/MKLDNNPoolLayer.h index 12821cda73..1eb0ee4ad9 100644 --- a/paddle/gserver/layers/MKLDNNPoolLayer.h +++ b/paddle/legacy/gserver/layers/MKLDNNPoolLayer.h @@ -27,7 +27,7 @@ typedef mkldnn::pooling_backward pool_bwd; * The config file api is mkldnn_pool */ class MKLDNNPoolLayer : public MKLDNNLayer { -protected: + protected: // padding height and width int ph_, pw_; // stride height and width @@ -44,7 +44,7 @@ protected: // test_pooling_forward.cpp, pool need workspace for backward std::shared_ptr workspace_; -public: + public: explicit MKLDNNPoolLayer(const LayerConfig& config) : MKLDNNLayer(config) {} ~MKLDNNPoolLayer() {} @@ -70,7 +70,7 @@ public: << ", sw: " << sw_; } -protected: + protected: void resetFwdBuffers(MKLDNNMatrixPtr& in, MKLDNNMatrixPtr& out); void resetFwdPD(std::shared_ptr& pd, MKLDNNMatrixPtr in, diff --git a/paddle/gserver/layers/MKLPackedRecurrentLayer.cpp b/paddle/legacy/gserver/layers/MKLPackedRecurrentLayer.cpp similarity index 100% rename from paddle/gserver/layers/MKLPackedRecurrentLayer.cpp rename to paddle/legacy/gserver/layers/MKLPackedRecurrentLayer.cpp diff --git a/paddle/gserver/layers/MKLPackedRecurrentLayer.h b/paddle/legacy/gserver/layers/MKLPackedRecurrentLayer.h similarity index 98% rename from paddle/gserver/layers/MKLPackedRecurrentLayer.h rename to paddle/legacy/gserver/layers/MKLPackedRecurrentLayer.h index 37eb362d45..441025a9c9 100644 --- a/paddle/gserver/layers/MKLPackedRecurrentLayer.h +++ b/paddle/legacy/gserver/layers/MKLPackedRecurrentLayer.h @@ -29,7 +29,7 @@ namespace paddle { */ class MKLPackedRecurrentLayer : public RecurrentLayer { -public: + public: explicit MKLPackedRecurrentLayer(const LayerConfig& config) : RecurrentLayer(config) {} @@ -38,7 +38,7 @@ public: void backward(const UpdateCallback& callback) override; -protected: + protected: void forwardBatch(int batchSize, size_t numSequences, const int* starts) override; @@ -47,7 +47,7 @@ protected: size_t numSequences, const int* starts) override; -protected: + protected: /// packed_weight_ contains same data with /// RecurrentLayer::weight_ but is packed std::unique_ptr packed_weight_; diff --git a/paddle/gserver/layers/MKLPackedWeight.h b/paddle/legacy/gserver/layers/MKLPackedWeight.h similarity index 93% rename from paddle/gserver/layers/MKLPackedWeight.h rename to paddle/legacy/gserver/layers/MKLPackedWeight.h index 28b8a7db7c..47f225bd03 100644 --- a/paddle/gserver/layers/MKLPackedWeight.h +++ b/paddle/legacy/gserver/layers/MKLPackedWeight.h @@ -14,14 +14,14 @@ limitations under the License. */ #pragma once -#include "paddle/math/MathFunctions.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/Weight.h" +#include "paddle/legacy/math/MathFunctions.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/parameter/Weight.h" namespace paddle { class MKLPackedWeight { -protected: + protected: /// The pointer of weight real *weight_; /// The pointer of cblas packed gemm to weight @@ -30,7 +30,7 @@ protected: size_t width_; bool transW_; -public: + public: explicit MKLPackedWeight(MatrixPtr weight, bool transW = false) { packedWeight_ = nullptr; weight_ = weight->getData(); @@ -59,7 +59,7 @@ public: dst->getWidth()); } -protected: + protected: void pack_(real *src) { if (!packedWeight_) { packedWeight_ = cblas_sgemm_alloc(CblasBMatrix, 1, width_, height_); diff --git a/paddle/gserver/layers/MaxIdLayer.cpp b/paddle/legacy/gserver/layers/MaxIdLayer.cpp similarity index 99% rename from paddle/gserver/layers/MaxIdLayer.cpp rename to paddle/legacy/gserver/layers/MaxIdLayer.cpp index 84e375d744..eecd4996e9 100644 --- a/paddle/gserver/layers/MaxIdLayer.cpp +++ b/paddle/legacy/gserver/layers/MaxIdLayer.cpp @@ -23,11 +23,11 @@ namespace paddle { * The config file api is maxid_layer. */ class MaxIdLayer : public Layer { -private: + private: /// a predetermined number of best states at each level size_t beamSize_; -public: + public: explicit MaxIdLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/MaxLayer.cpp b/paddle/legacy/gserver/layers/MaxLayer.cpp similarity index 96% rename from paddle/gserver/layers/MaxLayer.cpp rename to paddle/legacy/gserver/layers/MaxLayer.cpp index 7ee2e0dd94..b51251b663 100644 --- a/paddle/gserver/layers/MaxLayer.cpp +++ b/paddle/legacy/gserver/layers/MaxLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MaxLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/MaxLayer.h b/paddle/legacy/gserver/layers/MaxLayer.h similarity index 94% rename from paddle/gserver/layers/MaxLayer.h rename to paddle/legacy/gserver/layers/MaxLayer.h index 9dbc672652..12d0128e39 100644 --- a/paddle/gserver/layers/MaxLayer.h +++ b/paddle/legacy/gserver/layers/MaxLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "SequencePoolLayer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -39,11 +39,11 @@ namespace paddle { */ class MaxLayer : public SequencePoolLayer { -protected: + protected: // maxIndex_[i][j] = k : the value at (i, j) is from input[k]. IVectorPtr maxIndex_; -public: + public: explicit MaxLayer(const LayerConfig& config) : SequencePoolLayer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/MaxOutLayer.cpp b/paddle/legacy/gserver/layers/MaxOutLayer.cpp similarity index 100% rename from paddle/gserver/layers/MaxOutLayer.cpp rename to paddle/legacy/gserver/layers/MaxOutLayer.cpp diff --git a/paddle/gserver/layers/MaxOutLayer.h b/paddle/legacy/gserver/layers/MaxOutLayer.h similarity index 96% rename from paddle/gserver/layers/MaxOutLayer.h rename to paddle/legacy/gserver/layers/MaxOutLayer.h index 1fb371836b..e56f34b8e0 100644 --- a/paddle/gserver/layers/MaxOutLayer.h +++ b/paddle/legacy/gserver/layers/MaxOutLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { */ class MaxOutLayer : public Layer { -protected: + protected: size_t groups_; size_t imgSizeH_, imgSizeW_; /// outputChannels_ = channels_ / groups_ @@ -38,7 +38,7 @@ protected: size_t featLen_; IVectorPtr maxoutId_; -public: + public: /// return imgSizeH_ * imgSizeW_ * outputChannels_; size_t getSize(); diff --git a/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp b/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp similarity index 97% rename from paddle/gserver/layers/MaxPoolWithMaskLayer.cpp rename to paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp index e594e22b5e..a1cc59a719 100644 --- a/paddle/gserver/layers/MaxPoolWithMaskLayer.cpp +++ b/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MaxPoolWithMaskLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/MaxPoolWithMaskLayer.h b/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.h similarity index 95% rename from paddle/gserver/layers/MaxPoolWithMaskLayer.h rename to paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.h index 74cc8acf35..fcd5388abe 100644 --- a/paddle/gserver/layers/MaxPoolWithMaskLayer.h +++ b/paddle/legacy/gserver/layers/MaxPoolWithMaskLayer.h @@ -16,17 +16,17 @@ limitations under the License. */ #include #include "PoolLayer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** * @brief Basic parent layer of different kinds of pooling */ class MaxPoolWithMaskLayer : public PoolLayer { -protected: + protected: Argument mask_; -public: + public: explicit MaxPoolWithMaskLayer(const LayerConfig& config) : PoolLayer(config) {} diff --git a/paddle/gserver/layers/MixedLayer.cpp b/paddle/legacy/gserver/layers/MixedLayer.cpp similarity index 99% rename from paddle/gserver/layers/MixedLayer.cpp rename to paddle/legacy/gserver/layers/MixedLayer.cpp index 7dcb30b98d..63e658c09c 100644 --- a/paddle/gserver/layers/MixedLayer.cpp +++ b/paddle/legacy/gserver/layers/MixedLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MixedLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/MixedLayer.h b/paddle/legacy/gserver/layers/MixedLayer.h similarity index 98% rename from paddle/gserver/layers/MixedLayer.h rename to paddle/legacy/gserver/layers/MixedLayer.h index a1a43c52e4..43ee2bd818 100644 --- a/paddle/gserver/layers/MixedLayer.h +++ b/paddle/legacy/gserver/layers/MixedLayer.h @@ -30,7 +30,7 @@ namespace paddle { * The config file api is mixed_layer. */ class MixedLayer : public Layer { -public: + public: explicit MixedLayer(const LayerConfig& config) : Layer(config) {} ~MixedLayer() {} @@ -52,7 +52,7 @@ public: */ LayerStatePtr getState() override; -protected: + protected: std::vector> projections_; std::vector> operators_; /// the matrix size of projection state diff --git a/paddle/gserver/layers/MultiBoxLossLayer.cpp b/paddle/legacy/gserver/layers/MultiBoxLossLayer.cpp similarity index 100% rename from paddle/gserver/layers/MultiBoxLossLayer.cpp rename to paddle/legacy/gserver/layers/MultiBoxLossLayer.cpp diff --git a/paddle/gserver/layers/MultiBoxLossLayer.h b/paddle/legacy/gserver/layers/MultiBoxLossLayer.h similarity index 98% rename from paddle/gserver/layers/MultiBoxLossLayer.h rename to paddle/legacy/gserver/layers/MultiBoxLossLayer.h index 9935da5644..a358cded00 100644 --- a/paddle/gserver/layers/MultiBoxLossLayer.h +++ b/paddle/legacy/gserver/layers/MultiBoxLossLayer.h @@ -41,7 +41,7 @@ namespace paddle { */ class MultiBoxLossLayer : public CostLayer { -public: + public: explicit MultiBoxLossLayer(const LayerConfig& config) : CostLayer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); @@ -54,7 +54,7 @@ public: void backwardImp(Matrix& outputValue, Argument& label, Matrix& outputGrad) {} -protected: + protected: inline LayerPtr getPriorBoxLayer() { return inputLayers_[0]; } inline LayerPtr getLabelLayer() { return inputLayers_[1]; } inline LayerPtr getLocInputLayer(size_t index) { @@ -64,7 +64,7 @@ protected: return inputLayers_[2 + inputNum_ + index]; } -protected: + protected: size_t numClasses_; real overlapThreshold_; real negPosRatio_; diff --git a/paddle/gserver/layers/MultinomialSampler.cpp b/paddle/legacy/gserver/layers/MultinomialSampler.cpp similarity index 100% rename from paddle/gserver/layers/MultinomialSampler.cpp rename to paddle/legacy/gserver/layers/MultinomialSampler.cpp diff --git a/paddle/gserver/layers/MultinomialSampler.h b/paddle/legacy/gserver/layers/MultinomialSampler.h similarity index 97% rename from paddle/gserver/layers/MultinomialSampler.h rename to paddle/legacy/gserver/layers/MultinomialSampler.h index 1f9e818ee5..ed44535241 100644 --- a/paddle/gserver/layers/MultinomialSampler.h +++ b/paddle/legacy/gserver/layers/MultinomialSampler.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { * The computational complexity of generate one sample is O(1). */ class MultinomialSampler { -public: + public: MultinomialSampler(const real* prob, int size); //! protobuf always using double. @@ -53,7 +53,7 @@ public: return gen1([&g, this]() { return rand_(g); }); } -protected: + protected: /** * @brief Generation * @param[in] rand rand is a real random number distribution diff --git a/paddle/gserver/layers/MultiplexLayer.cpp b/paddle/legacy/gserver/layers/MultiplexLayer.cpp similarity index 97% rename from paddle/gserver/layers/MultiplexLayer.cpp rename to paddle/legacy/gserver/layers/MultiplexLayer.cpp index 82857f8c3e..9ca2b24175 100644 --- a/paddle/gserver/layers/MultiplexLayer.cpp +++ b/paddle/legacy/gserver/layers/MultiplexLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -37,7 +37,7 @@ namespace paddle { */ class MultiplexLayer : public Layer { -protected: + protected: /** * @brief A struct is used to save the copy information, includes input * layer index and copy size. @@ -64,7 +64,7 @@ protected: /// Temporary matrix pointer to point to output data. MatrixPtr tmpDest_; -public: + public: explicit MultiplexLayer(const LayerConfig& config) : Layer(config) {} ~MultiplexLayer() {} @@ -75,7 +75,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -private: + private: /** * @brief Calculate copy info for input layers. */ diff --git a/paddle/gserver/layers/NCELayer.cpp b/paddle/legacy/gserver/layers/NCELayer.cpp similarity index 99% rename from paddle/gserver/layers/NCELayer.cpp rename to paddle/legacy/gserver/layers/NCELayer.cpp index d3d7b1fd9a..ae4d640816 100644 --- a/paddle/gserver/layers/NCELayer.cpp +++ b/paddle/legacy/gserver/layers/NCELayer.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "MultinomialSampler.h" -#include "paddle/math/MathFunctions.h" +#include "paddle/legacy/math/MathFunctions.h" namespace paddle { @@ -54,7 +54,7 @@ class NCELayer : public Layer { IVectorPtr labelIds_; -public: + public: explicit NCELayer(const LayerConfig& config) : Layer(config), numClasses_(config.num_classes()), diff --git a/paddle/gserver/layers/NormLayer.cpp b/paddle/legacy/gserver/layers/NormLayer.cpp similarity index 97% rename from paddle/gserver/layers/NormLayer.cpp rename to paddle/legacy/gserver/layers/NormLayer.cpp index 4678f6fa9a..443e26dbc8 100644 --- a/paddle/gserver/layers/NormLayer.cpp +++ b/paddle/legacy/gserver/layers/NormLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "NormLayer.h" #include "NormProjectionLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { REGISTER_LAYER_CREATE_FUNC(norm, &NormLayer::create); diff --git a/paddle/gserver/layers/NormLayer.h b/paddle/legacy/gserver/layers/NormLayer.h similarity index 97% rename from paddle/gserver/layers/NormLayer.h rename to paddle/legacy/gserver/layers/NormLayer.h index c89cbbfce9..5ac00034d0 100644 --- a/paddle/gserver/layers/NormLayer.h +++ b/paddle/legacy/gserver/layers/NormLayer.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "Layer.h" #include "NormLayer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -27,7 +27,7 @@ namespace paddle { * @note Normalize the input in local region */ class NormLayer : public Layer { -public: + public: explicit NormLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -49,12 +49,12 @@ public: * Need to implement in the futrue. */ class ResponseNormLayer : public NormLayer { -protected: + protected: size_t channels_, size_, outputX_, imgSize_, outputY_, imgSizeY_; real scale_, pow_; MatrixPtr denoms_; -public: + public: explicit ResponseNormLayer(const LayerConfig& config) : NormLayer(config) {} bool init(const LayerMap& layerMap, @@ -76,7 +76,7 @@ public: * Cheng-Yang Fu, Alexander C. Berg. SSD: Single Shot MultiBox Detector */ class CrossChannelNormLayer : public NormLayer { -public: + public: explicit CrossChannelNormLayer(const LayerConfig& config) : NormLayer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); @@ -85,7 +85,7 @@ public: MatrixPtr createSampleMatrix(MatrixPtr data, size_t iter, size_t spatialDim); MatrixPtr createSpatialMatrix(MatrixPtr data, size_t iter, size_t spatialDim); -protected: + protected: size_t channels_; std::unique_ptr scale_; MatrixPtr scaleDiff_; diff --git a/paddle/gserver/layers/NormProjectionLayer.cpp b/paddle/legacy/gserver/layers/NormProjectionLayer.cpp similarity index 97% rename from paddle/gserver/layers/NormProjectionLayer.cpp rename to paddle/legacy/gserver/layers/NormProjectionLayer.cpp index 3013bbdbc7..72affaa1ce 100644 --- a/paddle/gserver/layers/NormProjectionLayer.cpp +++ b/paddle/legacy/gserver/layers/NormProjectionLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "NormProjectionLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { size_t CMRProjectionNormLayer::getSize() { diff --git a/paddle/gserver/layers/NormProjectionLayer.h b/paddle/legacy/gserver/layers/NormProjectionLayer.h similarity index 95% rename from paddle/gserver/layers/NormProjectionLayer.h rename to paddle/legacy/gserver/layers/NormProjectionLayer.h index 898b5823a9..492d1fcb72 100644 --- a/paddle/gserver/layers/NormProjectionLayer.h +++ b/paddle/legacy/gserver/layers/NormProjectionLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "NormLayer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -28,7 +28,7 @@ class CMRProjectionNormLayer : public ResponseNormLayer { size_t imgSizeH_, imgSizeW_; size_t outputH_, outputW_; -public: + public: explicit CMRProjectionNormLayer(const LayerConfig& config) : ResponseNormLayer(config) {} @@ -41,7 +41,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: TensorShape shape_; }; } // namespace paddle diff --git a/paddle/gserver/layers/Operator.cpp b/paddle/legacy/gserver/layers/Operator.cpp similarity index 100% rename from paddle/gserver/layers/Operator.cpp rename to paddle/legacy/gserver/layers/Operator.cpp diff --git a/paddle/gserver/layers/Operator.h b/paddle/legacy/gserver/layers/Operator.h similarity index 95% rename from paddle/gserver/layers/Operator.h rename to paddle/legacy/gserver/layers/Operator.h index a620926ccc..20a248985e 100644 --- a/paddle/gserver/layers/Operator.h +++ b/paddle/legacy/gserver/layers/Operator.h @@ -15,10 +15,10 @@ limitations under the License. */ #pragma once #include "ModelConfig.pb.h" -#include "paddle/parameter/Parameter.h" +#include "paddle/legacy/parameter/Parameter.h" #include "Layer.h" -#include "paddle/parameter/Argument.h" +#include "paddle/legacy/parameter/Argument.h" namespace paddle { @@ -34,7 +34,7 @@ namespace paddle { * @note: Operator can't have parameters. */ class Operator { -public: + public: static Operator* create(const OperatorConfig& config, bool useGpu); Operator(const OperatorConfig& config, bool useGpu) @@ -81,7 +81,7 @@ public: */ virtual LayerStatePtr getState() { return nullptr; } -protected: + protected: /// Config of operator OperatorConfig config_; bool useGpu_; diff --git a/paddle/gserver/layers/OuterProdLayer.cpp b/paddle/legacy/gserver/layers/OuterProdLayer.cpp similarity index 96% rename from paddle/gserver/layers/OuterProdLayer.cpp rename to paddle/legacy/gserver/layers/OuterProdLayer.cpp index 75f4abf93e..d0928be9d4 100644 --- a/paddle/gserver/layers/OuterProdLayer.cpp +++ b/paddle/legacy/gserver/layers/OuterProdLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -28,12 +28,12 @@ namespace paddle { */ class OuterProdLayer : public Layer { -protected: + protected: MatrixPtr tmpMtx0; MatrixPtr tmpRow0; MatrixPtr tmpRow1; -public: + public: explicit OuterProdLayer(const LayerConfig& config) : Layer(config) {} ~OuterProdLayer() {} diff --git a/paddle/gserver/layers/PadLayer.cpp b/paddle/legacy/gserver/layers/PadLayer.cpp similarity index 98% rename from paddle/gserver/layers/PadLayer.cpp rename to paddle/legacy/gserver/layers/PadLayer.cpp index b1910e108b..7b92b3de2d 100644 --- a/paddle/gserver/layers/PadLayer.cpp +++ b/paddle/legacy/gserver/layers/PadLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PadLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/PadLayer.h b/paddle/legacy/gserver/layers/PadLayer.h similarity index 98% rename from paddle/gserver/layers/PadLayer.h rename to paddle/legacy/gserver/layers/PadLayer.h index 7e09d7f8a0..46b8a59597 100644 --- a/paddle/gserver/layers/PadLayer.h +++ b/paddle/legacy/gserver/layers/PadLayer.h @@ -24,7 +24,7 @@ namespace paddle { * the 4th dimenstion according padc_, padh_ and padw_. */ class PadLayer : public Layer { -public: + public: explicit PadLayer(const LayerConfig& config) : Layer(config) {} ~PadLayer() {} @@ -34,7 +34,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: void setOutDims(const size_t batchSize); void setTensorDim(const size_t batchSize); diff --git a/paddle/gserver/layers/ParameterReluLayer.cpp b/paddle/legacy/gserver/layers/ParameterReluLayer.cpp similarity index 96% rename from paddle/gserver/layers/ParameterReluLayer.cpp rename to paddle/legacy/gserver/layers/ParameterReluLayer.cpp index 12d04fc1c3..23715d1975 100644 --- a/paddle/gserver/layers/ParameterReluLayer.cpp +++ b/paddle/legacy/gserver/layers/ParameterReluLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ParameterReluLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/ParameterReluLayer.h b/paddle/legacy/gserver/layers/ParameterReluLayer.h similarity index 94% rename from paddle/gserver/layers/ParameterReluLayer.h rename to paddle/legacy/gserver/layers/ParameterReluLayer.h index 3725fa4a11..3aac4b42f6 100644 --- a/paddle/gserver/layers/ParameterReluLayer.h +++ b/paddle/legacy/gserver/layers/ParameterReluLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -36,7 +36,7 @@ namespace paddle { */ class ParameterReluLayer : public Layer { -protected: + protected: std::unique_ptr weight_; /** @@ -51,7 +51,7 @@ protected: */ size_t partialSum_; -public: + public: explicit ParameterReluLayer(const LayerConfig& config) : Layer(config) {} ~ParameterReluLayer() {} diff --git a/paddle/gserver/layers/Pool3DLayer.cpp b/paddle/legacy/gserver/layers/Pool3DLayer.cpp similarity index 99% rename from paddle/gserver/layers/Pool3DLayer.cpp rename to paddle/legacy/gserver/layers/Pool3DLayer.cpp index 3ac9eb0d81..ae3f55c27f 100644 --- a/paddle/gserver/layers/Pool3DLayer.cpp +++ b/paddle/legacy/gserver/layers/Pool3DLayer.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "Pool3DLayer.h" #include "PoolProjectionLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/gserver/layers/Pool3DLayer.h b/paddle/legacy/gserver/layers/Pool3DLayer.h similarity index 93% rename from paddle/gserver/layers/Pool3DLayer.h rename to paddle/legacy/gserver/layers/Pool3DLayer.h index 59ee73f7cb..6851c44ab2 100644 --- a/paddle/gserver/layers/Pool3DLayer.h +++ b/paddle/legacy/gserver/layers/Pool3DLayer.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "Layer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -26,7 +26,7 @@ namespace paddle { * Pools the input within regions */ class Pool3DLayer : public Layer { -public: + public: explicit Pool3DLayer(const LayerConfig& config) : Layer(config) {} ~Pool3DLayer() {} @@ -36,7 +36,7 @@ public: void backward(const UpdateCallback& callback) override; size_t getSize(); -protected: + protected: int channels_; int sizeX_, sizeY_, sizeZ_; int strideW_, strideH_, strideD_; diff --git a/paddle/gserver/layers/PoolLayer.cpp b/paddle/legacy/gserver/layers/PoolLayer.cpp similarity index 98% rename from paddle/gserver/layers/PoolLayer.cpp rename to paddle/legacy/gserver/layers/PoolLayer.cpp index ee589e6be5..df172d9575 100644 --- a/paddle/gserver/layers/PoolLayer.cpp +++ b/paddle/legacy/gserver/layers/PoolLayer.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "PoolLayer.h" #include "MaxPoolWithMaskLayer.h" #include "PoolProjectionLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #ifdef PADDLE_WITH_CUDA #include "CudnnPoolLayer.h" #endif diff --git a/paddle/gserver/layers/PoolLayer.h b/paddle/legacy/gserver/layers/PoolLayer.h similarity index 92% rename from paddle/gserver/layers/PoolLayer.h rename to paddle/legacy/gserver/layers/PoolLayer.h index 58d5fb0a09..0808dfae84 100644 --- a/paddle/gserver/layers/PoolLayer.h +++ b/paddle/legacy/gserver/layers/PoolLayer.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include "Layer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { @@ -26,7 +26,7 @@ namespace paddle { * Pools the input within regions */ class PoolLayer : public Layer { -protected: + protected: size_t channels_, sizeX_, stride_, outputX_, imgSize_; int confPadding_; @@ -40,7 +40,7 @@ protected: bool excludeMode_; -public: + public: explicit PoolLayer(const LayerConfig& config) : Layer(config) {} /** diff --git a/paddle/gserver/layers/PoolProjection.cpp b/paddle/legacy/gserver/layers/PoolProjection.cpp similarity index 100% rename from paddle/gserver/layers/PoolProjection.cpp rename to paddle/legacy/gserver/layers/PoolProjection.cpp diff --git a/paddle/gserver/layers/PoolProjection.h b/paddle/legacy/gserver/layers/PoolProjection.h similarity index 96% rename from paddle/gserver/layers/PoolProjection.h rename to paddle/legacy/gserver/layers/PoolProjection.h index c99287dbf0..d01b6a13f0 100644 --- a/paddle/gserver/layers/PoolProjection.h +++ b/paddle/legacy/gserver/layers/PoolProjection.h @@ -15,12 +15,12 @@ limitations under the License. */ #pragma once #include "Projection.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" namespace paddle { class PoolProjection : public Projection { -protected: + protected: size_t imgSizeY_, imgSize_; size_t outputY_, outputX_; size_t strideY_, stride_; @@ -30,7 +30,7 @@ protected: std::string poolType_; bool excludeMode_; -public: + public: PoolProjection(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu); @@ -45,7 +45,7 @@ public: }; class MaxPoolProjection : public PoolProjection { -public: + public: MaxPoolProjection(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) @@ -56,7 +56,7 @@ public: }; class AvgPoolProjection : public PoolProjection { -public: + public: AvgPoolProjection(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu) diff --git a/paddle/gserver/layers/PoolProjectionLayer.cpp b/paddle/legacy/gserver/layers/PoolProjectionLayer.cpp similarity index 96% rename from paddle/gserver/layers/PoolProjectionLayer.cpp rename to paddle/legacy/gserver/layers/PoolProjectionLayer.cpp index 73d320e67e..e44b1d7ba1 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.cpp +++ b/paddle/legacy/gserver/layers/PoolProjectionLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "PoolProjectionLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/PoolProjectionLayer.h b/paddle/legacy/gserver/layers/PoolProjectionLayer.h similarity index 95% rename from paddle/gserver/layers/PoolProjectionLayer.h rename to paddle/legacy/gserver/layers/PoolProjectionLayer.h index 5a97a7769a..fcd35bbba4 100644 --- a/paddle/gserver/layers/PoolProjectionLayer.h +++ b/paddle/legacy/gserver/layers/PoolProjectionLayer.h @@ -17,20 +17,20 @@ limitations under the License. */ #include #include "PoolLayer.h" #include "PoolProjection.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** * @brief Basic parent layer of different kinds of pooling */ class PoolProjectionLayer : public PoolLayer { -protected: + protected: size_t imgSizeH_, imgSizeW_; size_t outputH_, outputW_; std::unique_ptr poolProjection_; ProjectionConfig projectionConfig_; -public: + public: explicit PoolProjectionLayer(const LayerConfig& config) : PoolLayer(config) { PoolConfig* conf = projectionConfig_.mutable_pool_conf(); *conf = config_.inputs(0).pool_conf(); diff --git a/paddle/gserver/layers/PowerLayer.cpp b/paddle/legacy/gserver/layers/PowerLayer.cpp similarity index 95% rename from paddle/gserver/layers/PowerLayer.cpp rename to paddle/legacy/gserver/layers/PowerLayer.cpp index 18f650fcda..5e94c64db6 100644 --- a/paddle/gserver/layers/PowerLayer.cpp +++ b/paddle/legacy/gserver/layers/PowerLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -32,10 +32,10 @@ namespace paddle { */ class PowerLayer : public Layer { -protected: + protected: MatrixPtr tmpMtx; -public: + public: explicit PowerLayer(const LayerConfig& config) : Layer(config) {} ~PowerLayer() {} diff --git a/paddle/gserver/layers/PrintLayer.cpp b/paddle/legacy/gserver/layers/PrintLayer.cpp similarity index 99% rename from paddle/gserver/layers/PrintLayer.cpp rename to paddle/legacy/gserver/layers/PrintLayer.cpp index 5a527d598d..6fbcc447f9 100644 --- a/paddle/gserver/layers/PrintLayer.cpp +++ b/paddle/legacy/gserver/layers/PrintLayer.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { class PrintLayer : public Layer { -public: + public: explicit PrintLayer(const LayerConfig& config) : Layer(config) {} void forward(PassType passType) override { diff --git a/paddle/gserver/layers/PriorBox.cpp b/paddle/legacy/gserver/layers/PriorBox.cpp similarity index 89% rename from paddle/gserver/layers/PriorBox.cpp rename to paddle/legacy/gserver/layers/PriorBox.cpp index af2cc05a95..83aab6e366 100644 --- a/paddle/gserver/layers/PriorBox.cpp +++ b/paddle/legacy/gserver/layers/PriorBox.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/BaseMatrix.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/BaseMatrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** @@ -28,7 +28,7 @@ namespace paddle { */ class PriorBoxLayer : public Layer { -public: + public: // NOLINT explicit PriorBoxLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap) override; @@ -36,7 +36,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback) override {} -protected: + protected: // NOLINT int numPriors_; std::vector minSize_; std::vector maxSize_; @@ -109,11 +109,18 @@ void PriorBoxLayer::forward(PassType passType) { real boxWidth = minSize; real boxHeight = minSize; - // priors with different aspect ratios - for (size_t r = 0; r < aspectRatio_.size(); r++) { - real ar = aspectRatio_[r]; - boxWidth = minSize * sqrt(ar); - boxHeight = minSize / sqrt(ar); + // first prior: aspect_ratio == 1.0, compatible to old logic + tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; + tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; + tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth; + tmpPtr[idx++] = (centerY + boxHeight / 2.) / imageHeight; + // set the variance. + for (int t = 0; t < 4; t++) tmpPtr[idx++] = variance_[t]; + + if (maxSize_.size() > 0) { + // square prior with size sqrt(minSize * maxSize) + real maxSize = maxSize_[s]; + boxWidth = boxHeight = sqrt(minSize * maxSize); tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth; @@ -122,10 +129,14 @@ void PriorBoxLayer::forward(PassType passType) { for (int t = 0; t < 4; t++) tmpPtr[idx++] = variance_[t]; } - if (maxSize_.size() > 0) { - // square prior with size sqrt(minSize * maxSize) - real maxSize = maxSize_[s]; - boxWidth = boxHeight = sqrt(minSize * maxSize); + // priors with different aspect ratios + for (size_t r = 0; r < aspectRatio_.size(); r++) { + real ar = aspectRatio_[r]; + if (fabs(ar - 1.0) < 1e-6) { + continue; + } + boxWidth = minSize * sqrt(ar); + boxHeight = minSize / sqrt(ar); tmpPtr[idx++] = (centerX - boxWidth / 2.) / imageWidth; tmpPtr[idx++] = (centerY - boxHeight / 2.) / imageHeight; tmpPtr[idx++] = (centerX + boxWidth / 2.) / imageWidth; diff --git a/paddle/gserver/layers/Projection.cpp b/paddle/legacy/gserver/layers/Projection.cpp similarity index 100% rename from paddle/gserver/layers/Projection.cpp rename to paddle/legacy/gserver/layers/Projection.cpp diff --git a/paddle/gserver/layers/Projection.h b/paddle/legacy/gserver/layers/Projection.h similarity index 98% rename from paddle/gserver/layers/Projection.h rename to paddle/legacy/gserver/layers/Projection.h index 1f0b96c79e..974f5a2cac 100644 --- a/paddle/gserver/layers/Projection.h +++ b/paddle/legacy/gserver/layers/Projection.h @@ -16,7 +16,7 @@ limitations under the License. */ #include "Layer.h" #include "ModelConfig.pb.h" -#include "paddle/parameter/Parameter.h" +#include "paddle/legacy/parameter/Parameter.h" namespace paddle { @@ -37,7 +37,7 @@ namespace paddle { * to output Argument. */ class Projection { -public: + public: static Projection* create(const ProjectionConfig& config, ParameterPtr parameter, bool useGpu); @@ -98,7 +98,7 @@ public: */ size_t getOutputSize() const { return config_.output_size(); } -protected: + protected: /** * Create layer function. Function is called in forward or backward. * \param function, Layer::forward_ or Layer::backward_ @@ -119,7 +119,7 @@ protected: func->init(config); } -protected: + protected: /// Config of projection ProjectionConfig config_; /// Parameter of projection diff --git a/paddle/gserver/layers/ROIPoolLayer.cpp b/paddle/legacy/gserver/layers/ROIPoolLayer.cpp similarity index 100% rename from paddle/gserver/layers/ROIPoolLayer.cpp rename to paddle/legacy/gserver/layers/ROIPoolLayer.cpp diff --git a/paddle/gserver/layers/ROIPoolLayer.h b/paddle/legacy/gserver/layers/ROIPoolLayer.h similarity index 98% rename from paddle/gserver/layers/ROIPoolLayer.h rename to paddle/legacy/gserver/layers/ROIPoolLayer.h index b1735e9748..801a9b3aeb 100644 --- a/paddle/gserver/layers/ROIPoolLayer.h +++ b/paddle/legacy/gserver/layers/ROIPoolLayer.h @@ -33,7 +33,7 @@ namespace paddle { */ class ROIPoolLayer : public Layer { -protected: + protected: size_t channels_; size_t width_; size_t height_; @@ -44,7 +44,7 @@ protected: // Since there is no int matrix, use real maxtrix instead. MatrixPtr maxIdxs_; -public: + public: explicit ROIPoolLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/RecurrentLayer.cpp b/paddle/legacy/gserver/layers/RecurrentLayer.cpp similarity index 100% rename from paddle/gserver/layers/RecurrentLayer.cpp rename to paddle/legacy/gserver/layers/RecurrentLayer.cpp diff --git a/paddle/gserver/layers/RecurrentLayer.h b/paddle/legacy/gserver/layers/RecurrentLayer.h similarity index 98% rename from paddle/gserver/layers/RecurrentLayer.h rename to paddle/legacy/gserver/layers/RecurrentLayer.h index 8fd4fe6b78..287ea27a09 100644 --- a/paddle/gserver/layers/RecurrentLayer.h +++ b/paddle/legacy/gserver/layers/RecurrentLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #include #include "Layer.h" #include "SequenceToBatch.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -40,7 +40,7 @@ namespace paddle { */ class RecurrentLayer : public Layer { -public: + public: explicit RecurrentLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -56,7 +56,7 @@ public: LayerStatePtr getState() override; -protected: + protected: /** * @brief If user do not set --rnn_use_batch=true, it will * compute rnn forward one sequence by one sequence in default. @@ -110,7 +110,7 @@ protected: size_t numSequences, const int* starts); -protected: + protected: std::unique_ptr weight_; std::unique_ptr bias_; diff --git a/paddle/gserver/layers/RecurrentLayerGroup.cpp b/paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp similarity index 94% rename from paddle/gserver/layers/RecurrentLayerGroup.cpp rename to paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp index 44b57185c5..3932124599 100644 --- a/paddle/gserver/layers/RecurrentLayerGroup.cpp +++ b/paddle/legacy/gserver/layers/RecurrentLayerGroup.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/gserver/layers/Layer.h" +#include "paddle/legacy/gserver/layers/Layer.h" -#include "paddle/gserver/gradientmachines/RecurrentGradientMachine.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/gserver/gradientmachines/RecurrentGradientMachine.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -27,7 +27,7 @@ namespace paddle { * between RecurrentLayerGroupBegin and RecurrentLayerGroupEnd. */ class RecurrentLayerGroup : public Layer { -public: + public: explicit RecurrentLayerGroup(const LayerConfig& config) : Layer(config) {} void initSubNetwork(NeuralNetwork* rootNetwork, @@ -58,7 +58,7 @@ public: callback(*network_); } -private: + private: std::unique_ptr network_; }; diff --git a/paddle/gserver/layers/ResizeLayer.cpp b/paddle/legacy/gserver/layers/ResizeLayer.cpp similarity index 96% rename from paddle/gserver/layers/ResizeLayer.cpp rename to paddle/legacy/gserver/layers/ResizeLayer.cpp index 831f4c3b7e..8f8aad820f 100644 --- a/paddle/gserver/layers/ResizeLayer.cpp +++ b/paddle/legacy/gserver/layers/ResizeLayer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/BaseMatrix.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/BaseMatrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** @@ -24,7 +24,7 @@ namespace paddle { * resize matrix: (height * width / size) * size */ class ResizeLayer : public Layer { -public: + public: explicit ResizeLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/RotateLayer.cpp b/paddle/legacy/gserver/layers/RotateLayer.cpp similarity index 100% rename from paddle/gserver/layers/RotateLayer.cpp rename to paddle/legacy/gserver/layers/RotateLayer.cpp diff --git a/paddle/gserver/layers/RotateLayer.h b/paddle/legacy/gserver/layers/RotateLayer.h similarity index 95% rename from paddle/gserver/layers/RotateLayer.h rename to paddle/legacy/gserver/layers/RotateLayer.h index 3b619921ab..498e24372b 100644 --- a/paddle/gserver/layers/RotateLayer.h +++ b/paddle/legacy/gserver/layers/RotateLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** @@ -32,7 +32,7 @@ namespace paddle { */ class RotateLayer : public Layer { -public: + public: explicit RotateLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, const ParameterMap& parameterMap); @@ -40,7 +40,7 @@ public: void forward(PassType passType); void backward(const UpdateCallback& callback = nullptr); -private: + private: int batchSize_; int size_; int height_; diff --git a/paddle/gserver/layers/RowConvLayer.cpp b/paddle/legacy/gserver/layers/RowConvLayer.cpp similarity index 98% rename from paddle/gserver/layers/RowConvLayer.cpp rename to paddle/legacy/gserver/layers/RowConvLayer.cpp index 63b499e486..1961557dc2 100644 --- a/paddle/gserver/layers/RowConvLayer.cpp +++ b/paddle/legacy/gserver/layers/RowConvLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "RowConvLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/RowConvLayer.h b/paddle/legacy/gserver/layers/RowConvLayer.h similarity index 98% rename from paddle/gserver/layers/RowConvLayer.h rename to paddle/legacy/gserver/layers/RowConvLayer.h index ba0af1de68..3b74df0b1a 100644 --- a/paddle/gserver/layers/RowConvLayer.h +++ b/paddle/legacy/gserver/layers/RowConvLayer.h @@ -22,7 +22,7 @@ namespace paddle { * \brief Row Convolution Layer. */ class RowConvLayer : public Layer { -public: + public: explicit RowConvLayer(const LayerConfig& config) : Layer(config) {} ~RowConvLayer() {} @@ -32,7 +32,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -protected: + protected: // Row convolution weight, context_lenght_ * fan_out. // fan_out is the size of output feature. std::unique_ptr weight_; diff --git a/paddle/gserver/layers/RowL2NormLayer.cpp b/paddle/legacy/gserver/layers/RowL2NormLayer.cpp similarity index 99% rename from paddle/gserver/layers/RowL2NormLayer.cpp rename to paddle/legacy/gserver/layers/RowL2NormLayer.cpp index 7ff0c9bae9..d5e6e10a02 100644 --- a/paddle/gserver/layers/RowL2NormLayer.cpp +++ b/paddle/legacy/gserver/layers/RowL2NormLayer.cpp @@ -26,12 +26,12 @@ namespace paddle { */ class RowL2NormLayer : public Layer { -protected: + protected: MatrixPtr inSquare_; MatrixPtr l2NormReciprocal_; MatrixPtr dotSum_; -public: + public: explicit RowL2NormLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SamplingIdLayer.cpp b/paddle/legacy/gserver/layers/SamplingIdLayer.cpp similarity index 99% rename from paddle/gserver/layers/SamplingIdLayer.cpp rename to paddle/legacy/gserver/layers/SamplingIdLayer.cpp index 2edd915d22..dbce635881 100644 --- a/paddle/gserver/layers/SamplingIdLayer.cpp +++ b/paddle/legacy/gserver/layers/SamplingIdLayer.cpp @@ -31,7 +31,7 @@ class SamplingIdLayer : public Layer { std::uniform_real_distribution rand1_; std::vector tmpCpuInput_; -public: + public: explicit SamplingIdLayer(const LayerConfig& config) : Layer(config), rand1_(0, 1) {} diff --git a/paddle/gserver/layers/ScaleShiftLayer.cpp b/paddle/legacy/gserver/layers/ScaleShiftLayer.cpp similarity index 99% rename from paddle/gserver/layers/ScaleShiftLayer.cpp rename to paddle/legacy/gserver/layers/ScaleShiftLayer.cpp index 799d1fe51a..8af78a2e27 100644 --- a/paddle/gserver/layers/ScaleShiftLayer.cpp +++ b/paddle/legacy/gserver/layers/ScaleShiftLayer.cpp @@ -30,11 +30,11 @@ namespace paddle { */ class ScaleShiftLayer : public Layer { -protected: + protected: std::unique_ptr scale_; std::unique_ptr offset_; -public: + public: explicit ScaleShiftLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/ScaleSubRegionLayer.cpp b/paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp similarity index 98% rename from paddle/gserver/layers/ScaleSubRegionLayer.cpp rename to paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp index 68a0ff7358..70d44d2a7e 100644 --- a/paddle/gserver/layers/ScaleSubRegionLayer.cpp +++ b/paddle/legacy/gserver/layers/ScaleSubRegionLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "ScaleSubRegionLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { REGISTER_LAYER(scale_sub_region, ScaleSubRegionLayer); diff --git a/paddle/gserver/layers/ScaleSubRegionLayer.h b/paddle/legacy/gserver/layers/ScaleSubRegionLayer.h similarity index 98% rename from paddle/gserver/layers/ScaleSubRegionLayer.h rename to paddle/legacy/gserver/layers/ScaleSubRegionLayer.h index 6e861be485..fe431698bc 100644 --- a/paddle/gserver/layers/ScaleSubRegionLayer.h +++ b/paddle/legacy/gserver/layers/ScaleSubRegionLayer.h @@ -29,7 +29,7 @@ namespace paddle { * region. */ class ScaleSubRegionLayer : public Layer { -public: + public: explicit ScaleSubRegionLayer(const LayerConfig& config) : Layer(config) {} ~ScaleSubRegionLayer() {} @@ -40,7 +40,7 @@ public: void backward(const UpdateCallback& callback = nullptr); -protected: + protected: TensorShape shape_; TensorShape indicesShape_; size_t imgH_; diff --git a/paddle/gserver/layers/ScalingLayer.cpp b/paddle/legacy/gserver/layers/ScalingLayer.cpp similarity index 95% rename from paddle/gserver/layers/ScalingLayer.cpp rename to paddle/legacy/gserver/layers/ScalingLayer.cpp index 1d98a7373d..a8286b6614 100644 --- a/paddle/gserver/layers/ScalingLayer.cpp +++ b/paddle/legacy/gserver/layers/ScalingLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -32,7 +32,7 @@ namespace paddle { */ class ScalingLayer : public Layer { -public: + public: explicit ScalingLayer(const LayerConfig& config) : Layer(config) {} ~ScalingLayer() {} diff --git a/paddle/gserver/layers/ScalingProjection.cpp b/paddle/legacy/gserver/layers/ScalingProjection.cpp similarity index 98% rename from paddle/gserver/layers/ScalingProjection.cpp rename to paddle/legacy/gserver/layers/ScalingProjection.cpp index 99b5b68f54..4d871cafc4 100644 --- a/paddle/gserver/layers/ScalingProjection.cpp +++ b/paddle/legacy/gserver/layers/ScalingProjection.cpp @@ -17,7 +17,7 @@ limitations under the License. */ namespace paddle { class ScalingProjection : public Projection { -public: + public: ScalingProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu) @@ -48,7 +48,7 @@ public: } } -protected: + protected: std::unique_ptr weight_; }; diff --git a/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp similarity index 99% rename from paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp rename to paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp index 43c98993f3..72fb068148 100644 --- a/paddle/gserver/layers/SelectiveFullyConnectedLayer.cpp +++ b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.cpp @@ -15,9 +15,9 @@ limitations under the License. */ #include "SelectiveFullyConnectedLayer.h" #include #include -#include "paddle/math/SparseMatrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SelectiveFullyConnectedLayer.h b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h similarity index 96% rename from paddle/gserver/layers/SelectiveFullyConnectedLayer.h rename to paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h index 8156407418..3ba04d9b2a 100644 --- a/paddle/gserver/layers/SelectiveFullyConnectedLayer.h +++ b/paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -33,11 +33,11 @@ namespace paddle { * The config file api is selective_fc_layer. */ class SelectiveFullyConnectedLayer : public Layer { -protected: + protected: WeightList weights_; std::unique_ptr biases_; -private: + private: /** * Get selected columns each forward. */ @@ -60,7 +60,7 @@ private: /// if true, means output_.value is the same as Fc Layer bool fullOutput_; -public: + public: explicit SelectiveFullyConnectedLayer(const LayerConfig& config) : Layer(config), selCols_(nullptr) {} @@ -94,7 +94,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -private: + private: /** * @brief Make SelectiveFC act as FullyConnectedLayer */ diff --git a/paddle/gserver/layers/SequenceConcatLayer.cpp b/paddle/legacy/gserver/layers/SequenceConcatLayer.cpp similarity index 97% rename from paddle/gserver/layers/SequenceConcatLayer.cpp rename to paddle/legacy/gserver/layers/SequenceConcatLayer.cpp index cf573f3f33..7b598e11ac 100644 --- a/paddle/gserver/layers/SequenceConcatLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceConcatLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -29,10 +29,10 @@ namespace paddle { */ class SequenceConcatLayer : public Layer { -protected: + protected: std::unique_ptr biases_; -public: + public: explicit SequenceConcatLayer(const LayerConfig& config) : Layer(config) {} ~SequenceConcatLayer() {} diff --git a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp b/paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp similarity index 96% rename from paddle/gserver/layers/SequenceLastInstanceLayer.cpp rename to paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp index 6c4ae775c1..8735d71ba3 100644 --- a/paddle/gserver/layers/SequenceLastInstanceLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceLastInstanceLayer.cpp @@ -12,11 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "SequencePoolLayer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -38,12 +38,12 @@ namespace paddle { */ class SequenceLastInstanceLayer : public SequencePoolLayer { -protected: + protected: MatrixPtr tmpSrc_; MatrixPtr tmpDest_; std::vector instanceIds_; -public: + public: explicit SequenceLastInstanceLayer(const LayerConfig& config) : SequencePoolLayer(config) {} diff --git a/paddle/gserver/layers/SequencePoolLayer.cpp b/paddle/legacy/gserver/layers/SequencePoolLayer.cpp similarity index 98% rename from paddle/gserver/layers/SequencePoolLayer.cpp rename to paddle/legacy/gserver/layers/SequencePoolLayer.cpp index 650ab425d1..243b795db4 100644 --- a/paddle/gserver/layers/SequencePoolLayer.cpp +++ b/paddle/legacy/gserver/layers/SequencePoolLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SequencePoolLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/gserver/layers/SequencePoolLayer.h b/paddle/legacy/gserver/layers/SequencePoolLayer.h similarity index 97% rename from paddle/gserver/layers/SequencePoolLayer.h rename to paddle/legacy/gserver/layers/SequencePoolLayer.h index 254e4cc6b3..1c019b3130 100644 --- a/paddle/gserver/layers/SequencePoolLayer.h +++ b/paddle/legacy/gserver/layers/SequencePoolLayer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** @@ -41,7 +41,7 @@ namespace paddle { */ class SequencePoolLayer : public Layer { -protected: + protected: int type_; std::unique_ptr biases_; enum SequenceLevel { kNonSeq = 0, kSeq = 1 }; @@ -51,7 +51,7 @@ protected: // Whether the input sequence is reversed or not. bool reversed_ = false; -public: + public: explicit SequencePoolLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SequenceReshapeLayer.cpp b/paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp similarity index 97% rename from paddle/gserver/layers/SequenceReshapeLayer.cpp rename to paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp index fb96669917..e3d40cab50 100644 --- a/paddle/gserver/layers/SequenceReshapeLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceReshapeLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -29,12 +29,12 @@ namespace paddle { */ class SequenceReshapeLayer : public Layer { -protected: + protected: std::unique_ptr biases_; MatrixPtr reshapedOutputGrad; -public: + public: explicit SequenceReshapeLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SequenceSliceLayer.cpp b/paddle/legacy/gserver/layers/SequenceSliceLayer.cpp similarity index 97% rename from paddle/gserver/layers/SequenceSliceLayer.cpp rename to paddle/legacy/gserver/layers/SequenceSliceLayer.cpp index 1b7c33477e..3ed51c4ef2 100644 --- a/paddle/gserver/layers/SequenceSliceLayer.cpp +++ b/paddle/legacy/gserver/layers/SequenceSliceLayer.cpp @@ -13,15 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { class SequenceSliceLayer : public Layer { -public: + public: explicit SequenceSliceLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -30,7 +30,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -private: + private: /* * TODO(caoying) * In PaddePaddle, currently all matrices are real number types, diff --git a/paddle/gserver/layers/SequenceToBatch.cpp b/paddle/legacy/gserver/layers/SequenceToBatch.cpp similarity index 100% rename from paddle/gserver/layers/SequenceToBatch.cpp rename to paddle/legacy/gserver/layers/SequenceToBatch.cpp diff --git a/paddle/gserver/layers/SequenceToBatch.h b/paddle/legacy/gserver/layers/SequenceToBatch.h similarity index 97% rename from paddle/gserver/layers/SequenceToBatch.h rename to paddle/legacy/gserver/layers/SequenceToBatch.h index 8743a5ef10..7ed517937d 100644 --- a/paddle/gserver/layers/SequenceToBatch.h +++ b/paddle/legacy/gserver/layers/SequenceToBatch.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { @@ -39,7 +39,7 @@ namespace paddle { * */ class SequenceToBatch { -public: + public: explicit SequenceToBatch(bool useGpu) : useGpu_(useGpu) {} /* resize and calculate the batchIndex_ */ @@ -82,7 +82,7 @@ public: numBatch_ = seq2batch.numBatch_; } -protected: + protected: void sequence2BatchCopy(Matrix &batch, Matrix &sequence, IVector &seq2BatchIdx, diff --git a/paddle/gserver/layers/SliceProjection.cpp b/paddle/legacy/gserver/layers/SliceProjection.cpp similarity index 99% rename from paddle/gserver/layers/SliceProjection.cpp rename to paddle/legacy/gserver/layers/SliceProjection.cpp index 5627ad1eb3..b474f2db75 100644 --- a/paddle/gserver/layers/SliceProjection.cpp +++ b/paddle/legacy/gserver/layers/SliceProjection.cpp @@ -44,14 +44,14 @@ namespace paddle { * The config file api is slice_projection. */ class SliceProjection : public Projection { -public: + public: SliceProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu); virtual void forward(); virtual void backward(const UpdateCallback& callback); -protected: + protected: std::vector> slices_; }; diff --git a/paddle/gserver/layers/SlopeInterceptLayer.cpp b/paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp similarity index 95% rename from paddle/gserver/layers/SlopeInterceptLayer.cpp rename to paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp index c94a07e5da..9168fd7dda 100644 --- a/paddle/gserver/layers/SlopeInterceptLayer.cpp +++ b/paddle/legacy/gserver/layers/SlopeInterceptLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -36,7 +36,7 @@ namespace paddle { */ class SlopeInterceptLayer : public Layer { -public: + public: explicit SlopeInterceptLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.cpp b/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.cpp similarity index 100% rename from paddle/gserver/layers/SpatialPyramidPoolLayer.cpp rename to paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.cpp diff --git a/paddle/gserver/layers/SpatialPyramidPoolLayer.h b/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h similarity index 94% rename from paddle/gserver/layers/SpatialPyramidPoolLayer.h rename to paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h index 6cb5fdf83e..6d8ed9c878 100644 --- a/paddle/gserver/layers/SpatialPyramidPoolLayer.h +++ b/paddle/legacy/gserver/layers/SpatialPyramidPoolLayer.h @@ -16,8 +16,8 @@ limitations under the License. */ #include "Layer.h" #include "PoolProjection.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { /** @@ -29,7 +29,7 @@ namespace paddle { */ class SpatialPyramidPoolLayer : public Layer { -protected: + protected: size_t channels_; size_t imgSizeW_; size_t imgSizeH_; @@ -40,7 +40,7 @@ protected: std::vector projOutput_; std::vector> projCol_; -public: + public: explicit SpatialPyramidPoolLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SubNestedSequenceLayer.cpp b/paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp similarity index 97% rename from paddle/gserver/layers/SubNestedSequenceLayer.cpp rename to paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp index db240ab0c9..f363c2ac8d 100644 --- a/paddle/gserver/layers/SubNestedSequenceLayer.cpp +++ b/paddle/legacy/gserver/layers/SubNestedSequenceLayer.cpp @@ -13,15 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { class SubNestedSequenceLayer : public Layer { -public: + public: explicit SubNestedSequenceLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -30,7 +30,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback = nullptr) override; -private: + private: /* * This functions generates the indices of rows in a batch according to the * indices of selected sub-sequence in each sequence. diff --git a/paddle/gserver/layers/SubSequenceLayer.cpp b/paddle/legacy/gserver/layers/SubSequenceLayer.cpp similarity index 97% rename from paddle/gserver/layers/SubSequenceLayer.cpp rename to paddle/legacy/gserver/layers/SubSequenceLayer.cpp index 808627f092..36796f0473 100644 --- a/paddle/gserver/layers/SubSequenceLayer.cpp +++ b/paddle/legacy/gserver/layers/SubSequenceLayer.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -27,12 +27,12 @@ namespace paddle { */ class SubSequenceLayer : public Layer { -protected: + protected: std::unique_ptr biases_; MatrixPtr tmpSrc_; MatrixPtr tmpDest_; -public: + public: explicit SubSequenceLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SumToOneNormLayer.cpp b/paddle/legacy/gserver/layers/SumToOneNormLayer.cpp similarity index 95% rename from paddle/gserver/layers/SumToOneNormLayer.cpp rename to paddle/legacy/gserver/layers/SumToOneNormLayer.cpp index ffbe149253..410f4dd7c9 100644 --- a/paddle/gserver/layers/SumToOneNormLayer.cpp +++ b/paddle/legacy/gserver/layers/SumToOneNormLayer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -32,13 +32,13 @@ namespace paddle { */ class SumToOneNormLayer : public Layer { -protected: + protected: /// reciprocalRowSum_ = \f$1 / \sum_{k=1}^N in[k]\f$ MatrixPtr reciprocalRowSum_; /// dotSum = output_.grad \f$.*\f$ output_.value MatrixPtr dotSum_; -public: + public: explicit SumToOneNormLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/SwitchOrderLayer.cpp b/paddle/legacy/gserver/layers/SwitchOrderLayer.cpp similarity index 98% rename from paddle/gserver/layers/SwitchOrderLayer.cpp rename to paddle/legacy/gserver/layers/SwitchOrderLayer.cpp index 704735de38..513f3df7bc 100644 --- a/paddle/gserver/layers/SwitchOrderLayer.cpp +++ b/paddle/legacy/gserver/layers/SwitchOrderLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "SwitchOrderLayer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/SwitchOrderLayer.h b/paddle/legacy/gserver/layers/SwitchOrderLayer.h similarity index 98% rename from paddle/gserver/layers/SwitchOrderLayer.h rename to paddle/legacy/gserver/layers/SwitchOrderLayer.h index 882437f443..8a551a2bba 100644 --- a/paddle/gserver/layers/SwitchOrderLayer.h +++ b/paddle/legacy/gserver/layers/SwitchOrderLayer.h @@ -22,7 +22,7 @@ namespace paddle { * \brief This layer calculate softmax in image channel dimension. */ class SwitchOrderLayer : public Layer { -public: + public: explicit SwitchOrderLayer(const LayerConfig& config) : Layer(config) {} ~SwitchOrderLayer() {} @@ -34,7 +34,7 @@ public: void setInDims(); void setOutDims(); -protected: + protected: std::vector> nchw2nhwc_; std::vector> nhwc2nchw_; TensorShape inDims_; diff --git a/paddle/gserver/layers/TableProjection.cpp b/paddle/legacy/gserver/layers/TableProjection.cpp similarity index 100% rename from paddle/gserver/layers/TableProjection.cpp rename to paddle/legacy/gserver/layers/TableProjection.cpp diff --git a/paddle/gserver/layers/TableProjection.h b/paddle/legacy/gserver/layers/TableProjection.h similarity index 98% rename from paddle/gserver/layers/TableProjection.h rename to paddle/legacy/gserver/layers/TableProjection.h index ffb05e68f0..60286149f4 100644 --- a/paddle/gserver/layers/TableProjection.h +++ b/paddle/legacy/gserver/layers/TableProjection.h @@ -32,7 +32,7 @@ namespace paddle { * @note If \f$ids[i] = -1\f$, it will be ignored. */ class TableProjection : public Projection { -public: + public: TableProjection(const ProjectionConfig& config, const ParameterPtr& parameter, bool useGpu); @@ -43,7 +43,7 @@ public: virtual void forward(); virtual void backward(const UpdateCallback& callback); -protected: + protected: std::unique_ptr table_; }; diff --git a/paddle/gserver/layers/TensorLayer.cpp b/paddle/legacy/gserver/layers/TensorLayer.cpp similarity index 98% rename from paddle/gserver/layers/TensorLayer.cpp rename to paddle/legacy/gserver/layers/TensorLayer.cpp index b2271c63ef..7f874bce0f 100644 --- a/paddle/gserver/layers/TensorLayer.cpp +++ b/paddle/legacy/gserver/layers/TensorLayer.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "TensorLayer.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { diff --git a/paddle/gserver/layers/TensorLayer.h b/paddle/legacy/gserver/layers/TensorLayer.h similarity index 93% rename from paddle/gserver/layers/TensorLayer.h rename to paddle/legacy/gserver/layers/TensorLayer.h index 8a323aa15f..fc491a7c9f 100644 --- a/paddle/gserver/layers/TensorLayer.h +++ b/paddle/legacy/gserver/layers/TensorLayer.h @@ -15,8 +15,8 @@ limitations under the License. */ #pragma once #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { @@ -37,11 +37,11 @@ namespace paddle { */ class TensorLayer : public Layer { -protected: + protected: WeightList weights_; std::unique_ptr biases_; -public: + public: explicit TensorLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/TransLayer.cpp b/paddle/legacy/gserver/layers/TransLayer.cpp similarity index 97% rename from paddle/gserver/layers/TransLayer.cpp rename to paddle/legacy/gserver/layers/TransLayer.cpp index cf87ca53d1..fd1d435ea5 100644 --- a/paddle/gserver/layers/TransLayer.cpp +++ b/paddle/legacy/gserver/layers/TransLayer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "TransLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { REGISTER_LAYER(trans, TransLayer); diff --git a/paddle/gserver/layers/TransLayer.h b/paddle/legacy/gserver/layers/TransLayer.h similarity index 96% rename from paddle/gserver/layers/TransLayer.h rename to paddle/legacy/gserver/layers/TransLayer.h index 03d0948624..0a6b13933f 100644 --- a/paddle/gserver/layers/TransLayer.h +++ b/paddle/legacy/gserver/layers/TransLayer.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "Layer.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { /** @@ -29,7 +29,7 @@ namespace paddle { * The config file api is trans_layer. */ class TransLayer : public Layer { -public: + public: explicit TransLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, diff --git a/paddle/gserver/layers/TransposedFullMatrixProjection.cpp b/paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp similarity index 97% rename from paddle/gserver/layers/TransposedFullMatrixProjection.cpp rename to paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp index 755389f707..c8533dc7d7 100644 --- a/paddle/gserver/layers/TransposedFullMatrixProjection.cpp +++ b/paddle/legacy/gserver/layers/TransposedFullMatrixProjection.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Projection.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -24,14 +24,14 @@ namespace paddle { * The config file api is trans_full_matrix_projection. */ class TransposedFullMatrixProjection : public Projection { -public: + public: TransposedFullMatrixProjection(const ProjectionConfig& config, ParameterPtr parameter, bool useGPu); virtual void forward(); virtual void backward(const UpdateCallback& callback); -protected: + protected: std::unique_ptr weight_; }; diff --git a/paddle/gserver/layers/UpsampleLayer.cpp b/paddle/legacy/gserver/layers/UpsampleLayer.cpp similarity index 100% rename from paddle/gserver/layers/UpsampleLayer.cpp rename to paddle/legacy/gserver/layers/UpsampleLayer.cpp diff --git a/paddle/gserver/layers/UpsampleLayer.h b/paddle/legacy/gserver/layers/UpsampleLayer.h similarity index 90% rename from paddle/gserver/layers/UpsampleLayer.h rename to paddle/legacy/gserver/layers/UpsampleLayer.h index 25efbac5e9..2fe5938244 100644 --- a/paddle/gserver/layers/UpsampleLayer.h +++ b/paddle/legacy/gserver/layers/UpsampleLayer.h @@ -16,9 +16,9 @@ limitations under the License. */ #include #include "Layer.h" -#include "paddle/math/Matrix.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Stat.h" namespace paddle { @@ -30,7 +30,7 @@ namespace paddle { */ class UpsampleLayer : public Layer { -public: + public: explicit UpsampleLayer(const LayerConfig& config) : Layer(config) {} ~UpsampleLayer() {} @@ -42,7 +42,7 @@ public: size_t getOutputSize(); -protected: + protected: size_t scale_, scaleY_; size_t upsampleSize_, upsampleSizeY_; size_t padOutX_, padOutY_; diff --git a/paddle/gserver/layers/ValidationLayer.cpp b/paddle/legacy/gserver/layers/ValidationLayer.cpp similarity index 99% rename from paddle/gserver/layers/ValidationLayer.cpp rename to paddle/legacy/gserver/layers/ValidationLayer.cpp index b626825a7b..9956fd2ed4 100644 --- a/paddle/gserver/layers/ValidationLayer.cpp +++ b/paddle/legacy/gserver/layers/ValidationLayer.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "ValidationLayer.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/gserver/layers/ValidationLayer.h b/paddle/legacy/gserver/layers/ValidationLayer.h similarity index 96% rename from paddle/gserver/layers/ValidationLayer.h rename to paddle/legacy/gserver/layers/ValidationLayer.h index f412d685c0..fbc94e8ef5 100644 --- a/paddle/gserver/layers/ValidationLayer.h +++ b/paddle/legacy/gserver/layers/ValidationLayer.h @@ -16,14 +16,14 @@ limitations under the License. */ #include #include "Layer.h" -#include "paddle/gserver/evaluators/Evaluator.h" +#include "paddle/legacy/gserver/evaluators/Evaluator.h" DECLARE_int32(trainer_id); namespace paddle { class ValidationLayer : public Layer { -public: + public: explicit ValidationLayer(const LayerConfig& config) : Layer(config) {} bool init(const LayerMap& layerMap, @@ -51,7 +51,7 @@ public: * AucValidation */ class AucValidation : public ValidationLayer { -public: + public: explicit AucValidation(const LayerConfig& config) : ValidationLayer(config), cpuOutput_(nullptr), @@ -72,7 +72,7 @@ public: }; std::vector predictArray_; -private: + private: bool passBegin_; std::unique_ptr evaluator_; MatrixPtr cpuOutput_; @@ -84,7 +84,7 @@ private: * positive-negative pair rate Validation */ class PnpairValidation : public ValidationLayer { -public: + public: explicit PnpairValidation(const LayerConfig& config) : ValidationLayer(config) {} @@ -95,7 +95,7 @@ public: void onPassEnd() override; -private: + private: bool passBegin_; std::unique_ptr evaluator_; }; diff --git a/paddle/gserver/layers/WarpCTCLayer.cpp b/paddle/legacy/gserver/layers/WarpCTCLayer.cpp similarity index 100% rename from paddle/gserver/layers/WarpCTCLayer.cpp rename to paddle/legacy/gserver/layers/WarpCTCLayer.cpp diff --git a/paddle/gserver/layers/WarpCTCLayer.h b/paddle/legacy/gserver/layers/WarpCTCLayer.h similarity index 98% rename from paddle/gserver/layers/WarpCTCLayer.h rename to paddle/legacy/gserver/layers/WarpCTCLayer.h index 6f6be359c0..3017ca794e 100644 --- a/paddle/gserver/layers/WarpCTCLayer.h +++ b/paddle/legacy/gserver/layers/WarpCTCLayer.h @@ -26,7 +26,7 @@ namespace paddle { * The config file api is warp_ctc_layer. */ class WarpCTCLayer : public Layer { -public: + public: explicit WarpCTCLayer(const LayerConfig& config) : Layer(config) {} ~WarpCTCLayer() {} @@ -35,7 +35,7 @@ public: void forward(PassType passType) override; void backward(const UpdateCallback& callback) override; -protected: + protected: /** * sequence matrix and batch matrix copy: * sequence (s0, s0, s0, s0; s1, s1; s2, s2, s2; s3) @@ -49,7 +49,7 @@ protected: const ICpuGpuVectorPtr& seqStartPositions, bool normByTimes); -protected: + protected: size_t numClasses_; size_t blank_; size_t maxSequenceLength_; diff --git a/paddle/gserver/tests/.gitignore b/paddle/legacy/gserver/tests/.gitignore similarity index 100% rename from paddle/gserver/tests/.gitignore rename to paddle/legacy/gserver/tests/.gitignore diff --git a/paddle/gserver/tests/CMakeLists.txt b/paddle/legacy/gserver/tests/CMakeLists.txt similarity index 97% rename from paddle/gserver/tests/CMakeLists.txt rename to paddle/legacy/gserver/tests/CMakeLists.txt index 9d7cad7584..93ddf5aa23 100644 --- a/paddle/gserver/tests/CMakeLists.txt +++ b/paddle/legacy/gserver/tests/CMakeLists.txt @@ -36,7 +36,7 @@ gserver_test(test_Upsample) set(PYTHON_PATH ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/gserver/tests) + ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/legacy/gserver/tests) function(gserver_test_with_python TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp) add_test(NAME ${TARGET} diff --git a/paddle/gserver/tests/LayerGradUtil.cpp b/paddle/legacy/gserver/tests/LayerGradUtil.cpp similarity index 100% rename from paddle/gserver/tests/LayerGradUtil.cpp rename to paddle/legacy/gserver/tests/LayerGradUtil.cpp diff --git a/paddle/gserver/tests/LayerGradUtil.h b/paddle/legacy/gserver/tests/LayerGradUtil.h similarity index 99% rename from paddle/gserver/tests/LayerGradUtil.h rename to paddle/legacy/gserver/tests/LayerGradUtil.h index 1999b2204b..941989a1da 100644 --- a/paddle/gserver/tests/LayerGradUtil.h +++ b/paddle/legacy/gserver/tests/LayerGradUtil.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" #include "paddle/testing/TestUtil.h" using namespace std; // NOLINT diff --git a/paddle/gserver/tests/MKLDNNTester.cpp b/paddle/legacy/gserver/tests/MKLDNNTester.cpp similarity index 99% rename from paddle/gserver/tests/MKLDNNTester.cpp rename to paddle/legacy/gserver/tests/MKLDNNTester.cpp index d2a9761a4e..b550ba9c72 100644 --- a/paddle/gserver/tests/MKLDNNTester.cpp +++ b/paddle/legacy/gserver/tests/MKLDNNTester.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "MKLDNNTester.h" -#include "paddle/gserver/layers/MKLDNNBase.h" -#include "paddle/gserver/layers/MKLDNNLayer.h" -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/gserver/layers/MKLDNNBase.h" +#include "paddle/legacy/gserver/layers/MKLDNNLayer.h" +#include "paddle/legacy/trainer/Trainer.h" namespace paddle { diff --git a/paddle/gserver/tests/MKLDNNTester.h b/paddle/legacy/gserver/tests/MKLDNNTester.h similarity index 96% rename from paddle/gserver/tests/MKLDNNTester.h rename to paddle/legacy/gserver/tests/MKLDNNTester.h index c1faa6fd90..086846ce53 100644 --- a/paddle/gserver/tests/MKLDNNTester.h +++ b/paddle/legacy/gserver/tests/MKLDNNTester.h @@ -17,8 +17,8 @@ limitations under the License. */ #include #include #include "LayerGradUtil.h" -#include "paddle/gserver/layers/MKLDNNBase.h" -#include "paddle/gserver/layers/MKLDNNLayer.h" +#include "paddle/legacy/gserver/layers/MKLDNNBase.h" +#include "paddle/legacy/gserver/layers/MKLDNNLayer.h" namespace paddle { @@ -44,7 +44,7 @@ class MKLDNNTester { std::vector paraValues; }; -protected: + protected: std::vector configs_; vector layerNames_; vector> dataLayers_; @@ -65,7 +65,7 @@ protected: /// passType, PASS_TRAIN, PASS_TEST or PASS_GC (Gradient Check pass) PassType passType_; -public: + public: explicit MKLDNNTester(size_t iter = 3, float epsilon = 1e-4) { iter_ = iter; eps_ = epsilon; @@ -75,7 +75,7 @@ public: ~MKLDNNTester() {} -public: + public: void run(const TestConfig& dnn, const TestConfig& ref, size_t batchSize, @@ -97,7 +97,7 @@ public: bool use_mkldnn, size_t iter = 2); -private: + private: void reset(const TestConfig& dnn, const TestConfig& ref, size_t batchSize); void setInputImgSize(); void runOnce(); diff --git a/paddle/gserver/tests/Sequence/dummy.list b/paddle/legacy/gserver/tests/Sequence/dummy.list similarity index 100% rename from paddle/gserver/tests/Sequence/dummy.list rename to paddle/legacy/gserver/tests/Sequence/dummy.list diff --git a/paddle/gserver/tests/Sequence/tour_dict_phrase.dict b/paddle/legacy/gserver/tests/Sequence/tour_dict_phrase.dict similarity index 100% rename from paddle/gserver/tests/Sequence/tour_dict_phrase.dict rename to paddle/legacy/gserver/tests/Sequence/tour_dict_phrase.dict diff --git a/paddle/gserver/tests/Sequence/tour_train_wdseg b/paddle/legacy/gserver/tests/Sequence/tour_train_wdseg similarity index 100% rename from paddle/gserver/tests/Sequence/tour_train_wdseg rename to paddle/legacy/gserver/tests/Sequence/tour_train_wdseg diff --git a/paddle/gserver/tests/Sequence/tour_train_wdseg.nest b/paddle/legacy/gserver/tests/Sequence/tour_train_wdseg.nest similarity index 100% rename from paddle/gserver/tests/Sequence/tour_train_wdseg.nest rename to paddle/legacy/gserver/tests/Sequence/tour_train_wdseg.nest diff --git a/paddle/legacy/gserver/tests/Sequence/train.list b/paddle/legacy/gserver/tests/Sequence/train.list new file mode 100644 index 0000000000..1109a24492 --- /dev/null +++ b/paddle/legacy/gserver/tests/Sequence/train.list @@ -0,0 +1 @@ +legacy/gserver/tests/Sequence/tour_train_wdseg diff --git a/paddle/legacy/gserver/tests/Sequence/train.list.nest b/paddle/legacy/gserver/tests/Sequence/train.list.nest new file mode 100644 index 0000000000..a67df35024 --- /dev/null +++ b/paddle/legacy/gserver/tests/Sequence/train.list.nest @@ -0,0 +1 @@ +legacy/gserver/tests/Sequence/tour_train_wdseg.nest diff --git a/paddle/gserver/tests/__init__.py b/paddle/legacy/gserver/tests/__init__.py similarity index 100% rename from paddle/gserver/tests/__init__.py rename to paddle/legacy/gserver/tests/__init__.py diff --git a/paddle/gserver/tests/concat_dotmul_a.conf b/paddle/legacy/gserver/tests/concat_dotmul_a.conf similarity index 100% rename from paddle/gserver/tests/concat_dotmul_a.conf rename to paddle/legacy/gserver/tests/concat_dotmul_a.conf diff --git a/paddle/gserver/tests/concat_dotmul_b.conf b/paddle/legacy/gserver/tests/concat_dotmul_b.conf similarity index 100% rename from paddle/gserver/tests/concat_dotmul_b.conf rename to paddle/legacy/gserver/tests/concat_dotmul_b.conf diff --git a/paddle/gserver/tests/concat_fullmatrix_a.conf b/paddle/legacy/gserver/tests/concat_fullmatrix_a.conf similarity index 100% rename from paddle/gserver/tests/concat_fullmatrix_a.conf rename to paddle/legacy/gserver/tests/concat_fullmatrix_a.conf diff --git a/paddle/gserver/tests/concat_fullmatrix_b.conf b/paddle/legacy/gserver/tests/concat_fullmatrix_b.conf similarity index 100% rename from paddle/gserver/tests/concat_fullmatrix_b.conf rename to paddle/legacy/gserver/tests/concat_fullmatrix_b.conf diff --git a/paddle/gserver/tests/concat_slice_a.conf b/paddle/legacy/gserver/tests/concat_slice_a.conf similarity index 100% rename from paddle/gserver/tests/concat_slice_a.conf rename to paddle/legacy/gserver/tests/concat_slice_a.conf diff --git a/paddle/gserver/tests/concat_slice_b.conf b/paddle/legacy/gserver/tests/concat_slice_b.conf similarity index 100% rename from paddle/gserver/tests/concat_slice_b.conf rename to paddle/legacy/gserver/tests/concat_slice_b.conf diff --git a/paddle/gserver/tests/concat_table_a.conf b/paddle/legacy/gserver/tests/concat_table_a.conf similarity index 100% rename from paddle/gserver/tests/concat_table_a.conf rename to paddle/legacy/gserver/tests/concat_table_a.conf diff --git a/paddle/gserver/tests/concat_table_b.conf b/paddle/legacy/gserver/tests/concat_table_b.conf similarity index 100% rename from paddle/gserver/tests/concat_table_b.conf rename to paddle/legacy/gserver/tests/concat_table_b.conf diff --git a/paddle/gserver/tests/img_conv_a.conf b/paddle/legacy/gserver/tests/img_conv_a.conf similarity index 100% rename from paddle/gserver/tests/img_conv_a.conf rename to paddle/legacy/gserver/tests/img_conv_a.conf diff --git a/paddle/gserver/tests/img_conv_b.conf b/paddle/legacy/gserver/tests/img_conv_b.conf similarity index 100% rename from paddle/gserver/tests/img_conv_b.conf rename to paddle/legacy/gserver/tests/img_conv_b.conf diff --git a/paddle/gserver/tests/img_conv_c.conf b/paddle/legacy/gserver/tests/img_conv_c.conf similarity index 100% rename from paddle/gserver/tests/img_conv_c.conf rename to paddle/legacy/gserver/tests/img_conv_c.conf diff --git a/paddle/gserver/tests/img_conv_cudnn.py b/paddle/legacy/gserver/tests/img_conv_cudnn.py similarity index 100% rename from paddle/gserver/tests/img_conv_cudnn.py rename to paddle/legacy/gserver/tests/img_conv_cudnn.py diff --git a/paddle/gserver/tests/img_conv_exconv.py b/paddle/legacy/gserver/tests/img_conv_exconv.py similarity index 100% rename from paddle/gserver/tests/img_conv_exconv.py rename to paddle/legacy/gserver/tests/img_conv_exconv.py diff --git a/paddle/gserver/tests/img_pool_a.conf b/paddle/legacy/gserver/tests/img_pool_a.conf similarity index 100% rename from paddle/gserver/tests/img_pool_a.conf rename to paddle/legacy/gserver/tests/img_pool_a.conf diff --git a/paddle/gserver/tests/img_pool_b.conf b/paddle/legacy/gserver/tests/img_pool_b.conf similarity index 100% rename from paddle/gserver/tests/img_pool_b.conf rename to paddle/legacy/gserver/tests/img_pool_b.conf diff --git a/paddle/gserver/tests/mkldnn_branch_net.conf b/paddle/legacy/gserver/tests/mkldnn_branch_net.conf similarity index 100% rename from paddle/gserver/tests/mkldnn_branch_net.conf rename to paddle/legacy/gserver/tests/mkldnn_branch_net.conf diff --git a/paddle/gserver/tests/mkldnn_simple_net.conf b/paddle/legacy/gserver/tests/mkldnn_simple_net.conf similarity index 100% rename from paddle/gserver/tests/mkldnn_simple_net.conf rename to paddle/legacy/gserver/tests/mkldnn_simple_net.conf diff --git a/paddle/gserver/tests/pyDataProvider.py b/paddle/legacy/gserver/tests/pyDataProvider.py similarity index 100% rename from paddle/gserver/tests/pyDataProvider.py rename to paddle/legacy/gserver/tests/pyDataProvider.py diff --git a/paddle/gserver/tests/pyDataProvider/pyDataProviderList b/paddle/legacy/gserver/tests/pyDataProvider/pyDataProviderList similarity index 100% rename from paddle/gserver/tests/pyDataProvider/pyDataProviderList rename to paddle/legacy/gserver/tests/pyDataProvider/pyDataProviderList diff --git a/paddle/gserver/tests/pyDataProvider/trainer.conf b/paddle/legacy/gserver/tests/pyDataProvider/trainer.conf similarity index 100% rename from paddle/gserver/tests/pyDataProvider/trainer.conf rename to paddle/legacy/gserver/tests/pyDataProvider/trainer.conf diff --git a/paddle/gserver/tests/rnn_data_provider.py b/paddle/legacy/gserver/tests/rnn_data_provider.py similarity index 100% rename from paddle/gserver/tests/rnn_data_provider.py rename to paddle/legacy/gserver/tests/rnn_data_provider.py diff --git a/paddle/gserver/tests/sequenceGen.py b/paddle/legacy/gserver/tests/sequenceGen.py similarity index 100% rename from paddle/gserver/tests/sequenceGen.py rename to paddle/legacy/gserver/tests/sequenceGen.py diff --git a/paddle/gserver/tests/sequence_layer_group.conf b/paddle/legacy/gserver/tests/sequence_layer_group.conf similarity index 93% rename from paddle/gserver/tests/sequence_layer_group.conf rename to paddle/legacy/gserver/tests/sequence_layer_group.conf index 50f2d89d02..ad1b61d582 100644 --- a/paddle/gserver/tests/sequence_layer_group.conf +++ b/paddle/legacy/gserver/tests/sequence_layer_group.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict' +dict_path = 'legacy/gserver/tests/Sequence/tour_dict_phrase.dict' dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count define_py_data_sources2( - train_list='gserver/tests/Sequence/train.list', + train_list='legacy/gserver/tests/Sequence/train.list', test_list=None, module='sequenceGen', obj='process', diff --git a/paddle/gserver/tests/sequence_lstm.conf b/paddle/legacy/gserver/tests/sequence_lstm.conf similarity index 93% rename from paddle/gserver/tests/sequence_lstm.conf rename to paddle/legacy/gserver/tests/sequence_lstm.conf index f49a827f22..6ab70e7071 100644 --- a/paddle/gserver/tests/sequence_lstm.conf +++ b/paddle/legacy/gserver/tests/sequence_lstm.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict' +dict_path = 'legacy/gserver/tests/Sequence/tour_dict_phrase.dict' dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count define_py_data_sources2( - train_list='gserver/tests/Sequence/train.list', + train_list='legacy/gserver/tests/Sequence/train.list', test_list=None, module='sequenceGen', obj='process', diff --git a/paddle/gserver/tests/sequence_nest_layer_group.conf b/paddle/legacy/gserver/tests/sequence_nest_layer_group.conf similarity index 95% rename from paddle/gserver/tests/sequence_nest_layer_group.conf rename to paddle/legacy/gserver/tests/sequence_nest_layer_group.conf index 71ef53d08a..75c36b1189 100644 --- a/paddle/gserver/tests/sequence_nest_layer_group.conf +++ b/paddle/legacy/gserver/tests/sequence_nest_layer_group.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict' +dict_path = 'legacy/gserver/tests/Sequence/tour_dict_phrase.dict' dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count define_py_data_sources2( - train_list='gserver/tests/Sequence/train.list.nest', + train_list='legacy/gserver/tests/Sequence/train.list.nest', test_list=None, module='sequenceGen', obj='process2', diff --git a/paddle/gserver/tests/sequence_nest_rnn.conf b/paddle/legacy/gserver/tests/sequence_nest_rnn.conf similarity index 96% rename from paddle/gserver/tests/sequence_nest_rnn.conf rename to paddle/legacy/gserver/tests/sequence_nest_rnn.conf index 2873a59966..bc3b22c2a9 100644 --- a/paddle/gserver/tests/sequence_nest_rnn.conf +++ b/paddle/legacy/gserver/tests/sequence_nest_rnn.conf @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', +define_py_data_sources2(train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_subseq') diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf b/paddle/legacy/gserver/tests/sequence_nest_rnn_multi_input.conf similarity index 97% rename from paddle/gserver/tests/sequence_nest_rnn_multi_input.conf rename to paddle/legacy/gserver/tests/sequence_nest_rnn_multi_input.conf index afdacfffd7..165ab22989 100644 --- a/paddle/gserver/tests/sequence_nest_rnn_multi_input.conf +++ b/paddle/legacy/gserver/tests/sequence_nest_rnn_multi_input.conf @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', +define_py_data_sources2(train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_subseq') diff --git a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py b/paddle/legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py similarity index 98% rename from paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py rename to paddle/legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py index 569d3c094b..9a48b7f25c 100644 --- a/paddle/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py +++ b/paddle/legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py @@ -15,7 +15,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ define_py_data_sources2( - train_list='gserver/tests/Sequence/dummy.list', + train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_unequalength_subseq') diff --git a/paddle/gserver/tests/sequence_recurrent.py b/paddle/legacy/gserver/tests/sequence_recurrent.py similarity index 93% rename from paddle/gserver/tests/sequence_recurrent.py rename to paddle/legacy/gserver/tests/sequence_recurrent.py index b88c09084e..e2c6a7935c 100644 --- a/paddle/gserver/tests/sequence_recurrent.py +++ b/paddle/legacy/gserver/tests/sequence_recurrent.py @@ -15,13 +15,13 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict' +dict_path = 'legacy/gserver/tests/Sequence/tour_dict_phrase.dict' dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count define_py_data_sources2( - train_list='gserver/tests/Sequence/train.list', + train_list='legacy/gserver/tests/Sequence/train.list', test_list=None, module='sequenceGen', obj='process', diff --git a/paddle/gserver/tests/sequence_recurrent_group.py b/paddle/legacy/gserver/tests/sequence_recurrent_group.py similarity index 94% rename from paddle/gserver/tests/sequence_recurrent_group.py rename to paddle/legacy/gserver/tests/sequence_recurrent_group.py index 0daf746700..b4638bd907 100644 --- a/paddle/gserver/tests/sequence_recurrent_group.py +++ b/paddle/legacy/gserver/tests/sequence_recurrent_group.py @@ -14,13 +14,13 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -dict_path = 'gserver/tests/Sequence/tour_dict_phrase.dict' +dict_path = 'legacy/gserver/tests/Sequence/tour_dict_phrase.dict' dict_file = dict() for line_count, line in enumerate(open(dict_path, "r")): dict_file[line.strip()] = line_count define_py_data_sources2( - train_list='gserver/tests/Sequence/train.list', + train_list='legacy/gserver/tests/Sequence/train.list', test_list=None, module='sequenceGen', obj='process', diff --git a/paddle/gserver/tests/sequence_rnn.conf b/paddle/legacy/gserver/tests/sequence_rnn.conf similarity index 95% rename from paddle/gserver/tests/sequence_rnn.conf rename to paddle/legacy/gserver/tests/sequence_rnn.conf index 1084edfe70..3133595c9c 100644 --- a/paddle/gserver/tests/sequence_rnn.conf +++ b/paddle/legacy/gserver/tests/sequence_rnn.conf @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', +define_py_data_sources2(train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_seq') diff --git a/paddle/gserver/tests/sequence_rnn_matched_inputs.py b/paddle/legacy/gserver/tests/sequence_rnn_matched_inputs.py similarity index 97% rename from paddle/gserver/tests/sequence_rnn_matched_inputs.py rename to paddle/legacy/gserver/tests/sequence_rnn_matched_inputs.py index 41a581e0cc..921cef04dd 100644 --- a/paddle/gserver/tests/sequence_rnn_matched_inputs.py +++ b/paddle/legacy/gserver/tests/sequence_rnn_matched_inputs.py @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ define_py_data_sources2( - train_list='gserver/tests/Sequence/dummy.list', + train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_mixed') diff --git a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py b/paddle/legacy/gserver/tests/sequence_rnn_mixed_inputs.py similarity index 97% rename from paddle/gserver/tests/sequence_rnn_mixed_inputs.py rename to paddle/legacy/gserver/tests/sequence_rnn_mixed_inputs.py index ae89d8e2bb..c7bcaf6c4b 100644 --- a/paddle/gserver/tests/sequence_rnn_mixed_inputs.py +++ b/paddle/legacy/gserver/tests/sequence_rnn_mixed_inputs.py @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ define_py_data_sources2( - train_list='gserver/tests/Sequence/dummy.list', + train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_mixed') diff --git a/paddle/gserver/tests/sequence_rnn_multi_input.conf b/paddle/legacy/gserver/tests/sequence_rnn_multi_input.conf similarity index 95% rename from paddle/gserver/tests/sequence_rnn_multi_input.conf rename to paddle/legacy/gserver/tests/sequence_rnn_multi_input.conf index 9fae974f30..bf4be779a2 100644 --- a/paddle/gserver/tests/sequence_rnn_multi_input.conf +++ b/paddle/legacy/gserver/tests/sequence_rnn_multi_input.conf @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ -define_py_data_sources2(train_list='gserver/tests/Sequence/dummy.list', +define_py_data_sources2(train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_seq') diff --git a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py b/paddle/legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py similarity index 97% rename from paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py rename to paddle/legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py index 6473fb3f3e..3612b49c22 100644 --- a/paddle/gserver/tests/sequence_rnn_multi_unequalength_inputs.py +++ b/paddle/legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * ######################## data source ################################ define_py_data_sources2( - train_list='gserver/tests/Sequence/dummy.list', + train_list='legacy/gserver/tests/Sequence/dummy.list', test_list=None, module='rnn_data_provider', obj='process_unequalength_seq') diff --git a/paddle/gserver/tests/test_ActivationGrad.cpp b/paddle/legacy/gserver/tests/test_ActivationGrad.cpp similarity index 98% rename from paddle/gserver/tests/test_ActivationGrad.cpp rename to paddle/legacy/gserver/tests/test_ActivationGrad.cpp index b5e4af26dc..f468d229a8 100644 --- a/paddle/gserver/tests/test_ActivationGrad.cpp +++ b/paddle/legacy/gserver/tests/test_ActivationGrad.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_BatchNorm.cpp b/paddle/legacy/gserver/tests/test_BatchNorm.cpp similarity index 96% rename from paddle/gserver/tests/test_BatchNorm.cpp rename to paddle/legacy/gserver/tests/test_BatchNorm.cpp index a3ec66c758..e21fa16074 100644 --- a/paddle/gserver/tests/test_BatchNorm.cpp +++ b/paddle/legacy/gserver/tests/test_BatchNorm.cpp @@ -16,12 +16,12 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" -#include "paddle/cuda/include/hl_batch_norm.h" -#include "paddle/math/tests/TensorCheck.h" +#include "paddle/legacy/cuda/include/hl_batch_norm.h" +#include "paddle/legacy/math/tests/TensorCheck.h" #include "paddle/testing/TestUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/gserver/tests/test_CRFLayerGrad.cpp b/paddle/legacy/gserver/tests/test_CRFLayerGrad.cpp similarity index 97% rename from paddle/gserver/tests/test_CRFLayerGrad.cpp rename to paddle/legacy/gserver/tests/test_CRFLayerGrad.cpp index 9f3d293656..1dafd1de4d 100644 --- a/paddle/gserver/tests/test_CRFLayerGrad.cpp +++ b/paddle/legacy/gserver/tests/test_CRFLayerGrad.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/LinearChainCRF.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/LinearChainCRF.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_CompareSparse.cpp b/paddle/legacy/gserver/tests/test_CompareSparse.cpp similarity index 97% rename from paddle/gserver/tests/test_CompareSparse.cpp rename to paddle/legacy/gserver/tests/test_CompareSparse.cpp index 2fbc404125..11b633a588 100644 --- a/paddle/gserver/tests/test_CompareSparse.cpp +++ b/paddle/legacy/gserver/tests/test_CompareSparse.cpp @@ -12,17 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" #include -#include +#include using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile1 = "gserver/tests/sequence_lstm.conf"; +static const string& configFile1 = "legacy/gserver/tests/sequence_lstm.conf"; DECLARE_bool(use_gpu); DECLARE_string(config); diff --git a/paddle/gserver/tests/test_CompareTwoNets.cpp b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp similarity index 96% rename from paddle/gserver/tests/test_CompareTwoNets.cpp rename to paddle/legacy/gserver/tests/test_CompareTwoNets.cpp index 1c9b4002a3..e19c34abbd 100644 --- a/paddle/gserver/tests/test_CompareTwoNets.cpp +++ b/paddle/legacy/gserver/tests/test_CompareTwoNets.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include #include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -40,9 +40,10 @@ DEFINE_double( DECLARE_bool(thread_local_rand_use_global_seed); DECLARE_int32(seed); -static const string& config_file_a = "gserver/tests/sequence_recurrent.py"; +static const string& config_file_a = + "legacy/gserver/tests/sequence_recurrent.py"; static const string& config_file_b = - "gserver/tests/sequence_recurrent_group.py"; + "legacy/gserver/tests/sequence_recurrent_group.py"; struct ComData { vector outArgs; diff --git a/paddle/gserver/tests/test_ConvTrans.cpp b/paddle/legacy/gserver/tests/test_ConvTrans.cpp similarity index 98% rename from paddle/gserver/tests/test_ConvTrans.cpp rename to paddle/legacy/gserver/tests/test_ConvTrans.cpp index 2e394a74b7..4ea0a3d379 100644 --- a/paddle/gserver/tests/test_ConvTrans.cpp +++ b/paddle/legacy/gserver/tests/test_ConvTrans.cpp @@ -16,9 +16,9 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_ConvUnify.cpp b/paddle/legacy/gserver/tests/test_ConvUnify.cpp similarity index 98% rename from paddle/gserver/tests/test_ConvUnify.cpp rename to paddle/legacy/gserver/tests/test_ConvUnify.cpp index ba820d9a2a..d4ca158352 100644 --- a/paddle/gserver/tests/test_ConvUnify.cpp +++ b/paddle/legacy/gserver/tests/test_ConvUnify.cpp @@ -16,9 +16,9 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp b/paddle/legacy/gserver/tests/test_CrossEntropyOverBeamGrad.cpp similarity index 99% rename from paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp rename to paddle/legacy/gserver/tests/test_CrossEntropyOverBeamGrad.cpp index 0041ed3093..34eb0dedff 100644 --- a/paddle/gserver/tests/test_CrossEntropyOverBeamGrad.cpp +++ b/paddle/legacy/gserver/tests/test_CrossEntropyOverBeamGrad.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_DetectionOutput.cpp b/paddle/legacy/gserver/tests/test_DetectionOutput.cpp similarity index 100% rename from paddle/gserver/tests/test_DetectionOutput.cpp rename to paddle/legacy/gserver/tests/test_DetectionOutput.cpp diff --git a/paddle/gserver/tests/test_Evaluator.cpp b/paddle/legacy/gserver/tests/test_Evaluator.cpp similarity index 99% rename from paddle/gserver/tests/test_Evaluator.cpp rename to paddle/legacy/gserver/tests/test_Evaluator.cpp index 4a8843f3af..8aab50d23e 100644 --- a/paddle/gserver/tests/test_Evaluator.cpp +++ b/paddle/legacy/gserver/tests/test_Evaluator.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" +#include "paddle/legacy/trainer/Trainer.h" #include "paddle/testing/TestUtil.h" -#include "paddle/trainer/Trainer.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_Expand.cpp b/paddle/legacy/gserver/tests/test_Expand.cpp similarity index 100% rename from paddle/gserver/tests/test_Expand.cpp rename to paddle/legacy/gserver/tests/test_Expand.cpp diff --git a/paddle/gserver/tests/test_KmaxSeqScore.cpp b/paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp similarity index 98% rename from paddle/gserver/tests/test_KmaxSeqScore.cpp rename to paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp index 168ffbdac8..e15b4e5038 100644 --- a/paddle/gserver/tests/test_KmaxSeqScore.cpp +++ b/paddle/legacy/gserver/tests/test_KmaxSeqScore.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/utils/GlobalConstants.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_LayerGrad.cpp b/paddle/legacy/gserver/tests/test_LayerGrad.cpp similarity index 99% rename from paddle/gserver/tests/test_LayerGrad.cpp rename to paddle/legacy/gserver/tests/test_LayerGrad.cpp index 1254d58050..979cf8ee67 100644 --- a/paddle/gserver/tests/test_LayerGrad.cpp +++ b/paddle/legacy/gserver/tests/test_LayerGrad.cpp @@ -19,8 +19,8 @@ limitations under the License. */ #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/math/MathUtils.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_LinearChainCRF.cpp b/paddle/legacy/gserver/tests/test_LinearChainCRF.cpp similarity index 95% rename from paddle/gserver/tests/test_LinearChainCRF.cpp rename to paddle/legacy/gserver/tests/test_LinearChainCRF.cpp index 423c31e27d..7082c1363a 100644 --- a/paddle/gserver/tests/test_LinearChainCRF.cpp +++ b/paddle/legacy/gserver/tests/test_LinearChainCRF.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include #include -#include "paddle/gserver/layers/LinearChainCRF.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/gserver/layers/LinearChainCRF.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/gserver/tests/test_MKLDNN.cpp b/paddle/legacy/gserver/tests/test_MKLDNN.cpp similarity index 98% rename from paddle/gserver/tests/test_MKLDNN.cpp rename to paddle/legacy/gserver/tests/test_MKLDNN.cpp index a34a3f6206..c79ccd1956 100644 --- a/paddle/gserver/tests/test_MKLDNN.cpp +++ b/paddle/legacy/gserver/tests/test_MKLDNN.cpp @@ -13,13 +13,13 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include #include #include "MKLDNNTester.h" #include "ModelConfig.pb.h" -#include "paddle/gserver/activations/MKLDNNActivation.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/gserver/activations/MKLDNNActivation.h" +#include "paddle/legacy/math/MathUtils.h" using namespace paddle; // NOLINT @@ -426,7 +426,7 @@ DECLARE_string(config_args); TEST(MKLDNNNet, net) { std::vector cases = {"simple", "branch"}; for (auto name : cases) { - std::string config = "./gserver/tests/mkldnn_" + name + "_net.conf"; + std::string config = "./legacy/gserver/tests/mkldnn_" + name + "_net.conf"; for (auto channels : {2, 32}) { std::ostringstream oss; oss << "channels=" << channels; diff --git a/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp b/paddle/legacy/gserver/tests/test_MaxPoolingWithMaskOutput.cpp similarity index 98% rename from paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp rename to paddle/legacy/gserver/tests/test_MaxPoolingWithMaskOutput.cpp index 5188d2abed..2bc261b4a8 100644 --- a/paddle/gserver/tests/test_MaxPoolingWithMaskOutput.cpp +++ b/paddle/legacy/gserver/tests/test_MaxPoolingWithMaskOutput.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "LayerGradUtil.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" #include "paddle/testing/TestUtil.h" using namespace paddle; diff --git a/paddle/gserver/tests/test_MultinomialSampler.cpp b/paddle/legacy/gserver/tests/test_MultinomialSampler.cpp similarity index 95% rename from paddle/gserver/tests/test_MultinomialSampler.cpp rename to paddle/legacy/gserver/tests/test_MultinomialSampler.cpp index 4a295ea9d5..25b1a1191d 100644 --- a/paddle/gserver/tests/test_MultinomialSampler.cpp +++ b/paddle/legacy/gserver/tests/test_MultinomialSampler.cpp @@ -18,16 +18,16 @@ limitations under the License. */ #include #undef PADDLE_DISABLE_TIMER -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" -#include "paddle/gserver/layers/MultinomialSampler.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/gserver/layers/MultinomialSampler.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT class MultinomialSamplerTester : public MultinomialSampler { -public: + public: MultinomialSamplerTester(real* prob, int size) : MultinomialSampler(prob, size) {} diff --git a/paddle/gserver/tests/test_NetworkCompare.cpp b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp similarity index 88% rename from paddle/gserver/tests/test_NetworkCompare.cpp rename to paddle/legacy/gserver/tests/test_NetworkCompare.cpp index fda3f2f793..c9f9f3e61b 100644 --- a/paddle/gserver/tests/test_NetworkCompare.cpp +++ b/paddle/legacy/gserver/tests/test_NetworkCompare.cpp @@ -14,13 +14,13 @@ limitations under the License. */ #undef PADDLE_DISABLE_TIMER #include -#include +#include #include #include +#include "paddle/legacy/trainer/Trainer.h" +#include "paddle/legacy/utils/Stat.h" #include "paddle/testing/TestUtil.h" -#include "paddle/trainer/Trainer.h" -#include "paddle/utils/Stat.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -220,33 +220,33 @@ void compareNetwork(const std::string& config_file_a, } TEST(Compare, concat_dotmul) { - std::string config_file_a = "./gserver/tests/concat_dotmul_a.conf"; - std::string config_file_b = "./gserver/tests/concat_dotmul_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_dotmul_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_dotmul_b.conf"; compareNetwork(config_file_a, config_file_b); } TEST(Compare, concat_fullmatrix) { - std::string config_file_a = "./gserver/tests/concat_fullmatrix_a.conf"; - std::string config_file_b = "./gserver/tests/concat_fullmatrix_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_fullmatrix_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_fullmatrix_b.conf"; compareNetwork(config_file_a, config_file_b); } TEST(Compare, concat_table) { - std::string config_file_a = "./gserver/tests/concat_table_a.conf"; - std::string config_file_b = "./gserver/tests/concat_table_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_table_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_table_b.conf"; compareNetwork(config_file_a, config_file_b); } TEST(Compare, concat_slice) { - std::string config_file_a = "./gserver/tests/concat_slice_a.conf"; - std::string config_file_b = "./gserver/tests/concat_slice_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/concat_slice_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/concat_slice_b.conf"; compareNetwork(config_file_a, config_file_b); } #ifdef PADDLE_WITH_CUDA TEST(Compare, img_pool) { - std::string config_file_a = "./gserver/tests/img_pool_a.conf"; - std::string config_file_b = "./gserver/tests/img_pool_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/img_pool_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/img_pool_b.conf"; bool useGpu = FLAGS_use_gpu; FLAGS_use_gpu = true; compareNetwork(config_file_a, config_file_b); @@ -254,8 +254,8 @@ TEST(Compare, img_pool) { } TEST(Compare, img_conv) { - std::string config_file_a = "./gserver/tests/img_conv_a.conf"; - std::string config_file_b = "./gserver/tests/img_conv_b.conf"; + std::string config_file_a = "./legacy/gserver/tests/img_conv_a.conf"; + std::string config_file_b = "./legacy/gserver/tests/img_conv_b.conf"; bool useGpu = FLAGS_use_gpu; FLAGS_use_gpu = true; compareNetwork(config_file_a, config_file_b); @@ -264,8 +264,8 @@ TEST(Compare, img_conv) { // Test cudnn_conv and exconv give the same result TEST(Compare, img_conv2) { - std::string config_file_a = "./gserver/tests/img_conv_cudnn.py"; - std::string config_file_b = "./gserver/tests/img_conv_exconv.py"; + std::string config_file_a = "./legacy/gserver/tests/img_conv_cudnn.py"; + std::string config_file_b = "./legacy/gserver/tests/img_conv_exconv.py"; bool useGpu = FLAGS_use_gpu; double eps = FLAGS_checkgrad_eps; FLAGS_use_gpu = true; diff --git a/paddle/gserver/tests/test_PriorBox.cpp b/paddle/legacy/gserver/tests/test_PriorBox.cpp similarity index 100% rename from paddle/gserver/tests/test_PriorBox.cpp rename to paddle/legacy/gserver/tests/test_PriorBox.cpp diff --git a/paddle/gserver/tests/test_PyDataProvider.cpp b/paddle/legacy/gserver/tests/test_PyDataProvider.cpp similarity index 95% rename from paddle/gserver/tests/test_PyDataProvider.cpp rename to paddle/legacy/gserver/tests/test_PyDataProvider.cpp index a1dee97950..0209e6818a 100644 --- a/paddle/gserver/tests/test_PyDataProvider.cpp +++ b/paddle/legacy/gserver/tests/test_PyDataProvider.cpp @@ -17,8 +17,8 @@ limitations under the License. */ #include -#include "paddle/gserver/dataproviders/PyDataProvider.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/gserver/dataproviders/PyDataProvider.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/testing/TestUtil.h" @@ -35,7 +35,8 @@ TEST(PyDataProvider, py_fill_slots) { config.set_load_data_module(std::string("pyDataProvider")); config.set_load_data_object(std::string("SimpleDataProvider")); config.clear_files(); - std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; + std::string dataFile = + "legacy/gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); #ifndef PADDLE_WITH_CUDA bool useGpu = false; @@ -68,7 +69,8 @@ TEST(PyDataProvider, py_fill_nest_slots) { config.set_load_data_module(std::string("pyDataProvider")); config.set_load_data_object(std::string("SimpleNestDataProvider")); config.clear_files(); - std::string dataFile = "gserver/tests/pyDataProvider/pyDataProviderList"; + std::string dataFile = + "legacy/gserver/tests/pyDataProvider/pyDataProviderList"; config.set_files(dataFile); EXPECT_EQ(config.IsInitialized(), true); #ifndef PADDLE_WITH_CUDA diff --git a/paddle/gserver/tests/test_PyDataProvider2.cpp b/paddle/legacy/gserver/tests/test_PyDataProvider2.cpp similarity index 98% rename from paddle/gserver/tests/test_PyDataProvider2.cpp rename to paddle/legacy/gserver/tests/test_PyDataProvider2.cpp index b39fb35345..de313ba82c 100644 --- a/paddle/gserver/tests/test_PyDataProvider2.cpp +++ b/paddle/legacy/gserver/tests/test_PyDataProvider2.cpp @@ -15,9 +15,9 @@ limitations under the License. */ #ifndef PADDLE_NO_PYTHON #include #include -#include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/gserver/dataproviders/DataProvider.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Util.h" DEFINE_string(train_list, "unittest.list", "file list for unittest"); diff --git a/paddle/gserver/tests/test_PyDataProvider2.py b/paddle/legacy/gserver/tests/test_PyDataProvider2.py similarity index 100% rename from paddle/gserver/tests/test_PyDataProvider2.py rename to paddle/legacy/gserver/tests/test_PyDataProvider2.py diff --git a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp similarity index 79% rename from paddle/gserver/tests/test_RecurrentGradientMachine.cpp rename to paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp index 72324fcf29..153c3e7f36 100644 --- a/paddle/gserver/tests/test_RecurrentGradientMachine.cpp +++ b/paddle/legacy/gserver/tests/test_RecurrentGradientMachine.cpp @@ -13,20 +13,20 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include DECLARE_int32(seed); using namespace paddle; // NOLINT using namespace std; // NOLINT class TrainerForTest : public paddle::Trainer { -public: + public: void startTrain() { GradientMachine& gm = *this->trainerInternal_.getGradientMachine(); gm.start(); @@ -102,11 +102,11 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) { FLAGS_use_gpu = useGpu; int num_passes = 5; real* cost1 = new real[num_passes]; - const string dir1 = "gserver/tests/t1"; + const string dir1 = "legacy/gserver/tests/t1"; CalCost(conf1, dir1, cost1, num_passes); real* cost2 = new real[num_passes]; - const string dir2 = "gserver/tests/t2"; + const string dir2 = "legacy/gserver/tests/t2"; CalCost(conf2, dir2, cost2, num_passes); for (int i = 0; i < num_passes; i++) { @@ -121,8 +121,8 @@ void test(const string& conf1, const string& conf2, double eps, bool useGpu) { TEST(RecurrentGradientMachine, HasSubSequence) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_layer_group.conf", - "gserver/tests/sequence_nest_layer_group.conf", + test("legacy/gserver/tests/sequence_layer_group.conf", + "legacy/gserver/tests/sequence_nest_layer_group.conf", 1e-5, useGpu); } @@ -130,8 +130,8 @@ TEST(RecurrentGradientMachine, HasSubSequence) { TEST(RecurrentGradientMachine, rnn) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn.conf", - "gserver/tests/sequence_nest_rnn.conf", + test("legacy/gserver/tests/sequence_rnn.conf", + "legacy/gserver/tests/sequence_nest_rnn.conf", 1e-6, useGpu); } @@ -139,8 +139,8 @@ TEST(RecurrentGradientMachine, rnn) { TEST(RecurrentGradientMachine, rnn_multi_input) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn_multi_input.conf", - "gserver/tests/sequence_nest_rnn_multi_input.conf", + test("legacy/gserver/tests/sequence_rnn_multi_input.conf", + "legacy/gserver/tests/sequence_nest_rnn_multi_input.conf", 1e-6, useGpu); } @@ -148,8 +148,8 @@ TEST(RecurrentGradientMachine, rnn_multi_input) { TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn_multi_unequalength_inputs.py", - "gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py", + test("legacy/gserver/tests/sequence_rnn_multi_unequalength_inputs.py", + "legacy/gserver/tests/sequence_nest_rnn_multi_unequalength_inputs.py", 1e-6, useGpu); } @@ -157,8 +157,8 @@ TEST(RecurrentGradientMachine, rnn_multi_unequalength_input) { TEST(RecurrentGradientMachine, rnn_mixed_input) { for (bool useGpu : {false, true}) { - test("gserver/tests/sequence_rnn_mixed_inputs.py", - "gserver/tests/sequence_rnn_matched_inputs.py", + test("legacy/gserver/tests/sequence_rnn_mixed_inputs.py", + "legacy/gserver/tests/sequence_rnn_matched_inputs.py", 1e-6, useGpu); } diff --git a/paddle/gserver/tests/test_RecurrentLayer.cpp b/paddle/legacy/gserver/tests/test_RecurrentLayer.cpp similarity index 97% rename from paddle/gserver/tests/test_RecurrentLayer.cpp rename to paddle/legacy/gserver/tests/test_RecurrentLayer.cpp index e5ce922f15..71198cb6a1 100644 --- a/paddle/gserver/tests/test_RecurrentLayer.cpp +++ b/paddle/legacy/gserver/tests/test_RecurrentLayer.cpp @@ -13,11 +13,11 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/Layer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/Layer.h" #include "paddle/testing/TestUtil.h" @@ -220,12 +220,12 @@ TEST(Layer, RecurrentLayer) { } #define protected public -#include "paddle/gserver/layers/GatedRecurrentLayer.h" -#include "paddle/gserver/layers/LstmLayer.h" -#include "paddle/gserver/layers/RecurrentLayer.h" +#include "paddle/legacy/gserver/layers/GatedRecurrentLayer.h" +#include "paddle/legacy/gserver/layers/LstmLayer.h" +#include "paddle/legacy/gserver/layers/RecurrentLayer.h" template class TestRecurrentLayer { -public: + public: LayerConfig config_; bool useGpu_; bool useBatch_; @@ -423,7 +423,7 @@ TEST(Layer, LstmLayer) { #ifdef PADDLE_WITH_MKLML -#include "paddle/gserver/layers/MKLPackedRecurrentLayer.h" +#include "paddle/legacy/gserver/layers/MKLPackedRecurrentLayer.h" LayerPtr initMKLPackedLayer(LayerConfig layerConfig, bool reversed, diff --git a/paddle/gserver/tests/test_SelectiveFCLayer.cpp b/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp similarity index 94% rename from paddle/gserver/tests/test_SelectiveFCLayer.cpp rename to paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp index 583e3bc545..1975d9196d 100644 --- a/paddle/gserver/tests/test_SelectiveFCLayer.cpp +++ b/paddle/legacy/gserver/tests/test_SelectiveFCLayer.cpp @@ -14,16 +14,16 @@ limitations under the License. */ #include #include -#include +#include #include #include #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/FullyConnectedLayer.h" -#include "paddle/gserver/layers/Layer.h" -#include "paddle/gserver/layers/SelectiveFullyConnectedLayer.h" -#include "paddle/math/CpuSparseMatrix.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/FullyConnectedLayer.h" +#include "paddle/legacy/gserver/layers/Layer.h" +#include "paddle/legacy/gserver/layers/SelectiveFullyConnectedLayer.h" +#include "paddle/legacy/math/CpuSparseMatrix.h" using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -76,7 +76,7 @@ void calcOutput(ComData& comData, FLAGS_config = configFile; FLAGS_config_args = configArgs; FLAGS_use_gpu = useGpu; - FLAGS_init_model_path = "gserver/tests/SelectiveFcTest/model"; + FLAGS_init_model_path = "legacy/gserver/tests/SelectiveFcTest/model"; *ThreadLocalRand::getSeed() = 0; srand(0); @@ -311,13 +311,13 @@ LayerPtr initFcLayer(LayerPtr dataLayer, #ifndef PADDLE_TYPE_DOUBLE // The parameter file used in fc.conf and selective_fc.conf is float TEST(Layer, SelectiveFcLayer_train_dense_mul) { - const string& fcConfig = "gserver/tests/SelectiveFcTest/conf/fc.conf"; + const string& fcConfig = "legacy/gserver/tests/SelectiveFcTest/conf/fc.conf"; const string& fcConfigArgs = - "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; + "filelist=legacy/gserver/tests/SelectiveFcTest/dense_mul_list"; const string& selFcConfig = - "gserver/tests/SelectiveFcTest/conf/selective_fc.conf"; + "legacy/gserver/tests/SelectiveFcTest/conf/selective_fc.conf"; const string& selConfigArgs = - "filelist=gserver/tests/SelectiveFcTest/dense_mul_list"; + "filelist=legacy/gserver/tests/SelectiveFcTest/dense_mul_list"; for (auto useGpu : {false, true}) { #ifndef PADDLE_WITH_CUDA @@ -350,7 +350,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, creatDataLayer("data", batchSize, dataLayerSize, values, useGpu); const string& selfcParaFile = - "gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"; + "legacy/gserver/tests/SelectiveFcTest/model/rand_fc_param.w.transpose"; const string& selfcParaName = "rand_fc_param.w.transpose"; std::shared_ptr selfcLayer = @@ -396,7 +396,7 @@ void testSelectiveFcLayerTrainSparseMul(const LayerConfig& config, size_t nnz = cpuOutMatSelfc->getElementCnt(); const string& fcParaFile = - "gserver/tests/SelectiveFcTest/model/rand_fc_param.w"; + "legacy/gserver/tests/SelectiveFcTest/model/rand_fc_param.w"; const string& fcParaName = "rand_fc_param.w"; LayerConfig fcLayerConfig; fcLayerConfig.set_name("fc_layer"); diff --git a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp b/paddle/legacy/gserver/tests/test_SeqSliceLayerGrad.cpp similarity index 99% rename from paddle/gserver/tests/test_SeqSliceLayerGrad.cpp rename to paddle/legacy/gserver/tests/test_SeqSliceLayerGrad.cpp index 406ca63b6e..05acd71421 100644 --- a/paddle/gserver/tests/test_SeqSliceLayerGrad.cpp +++ b/paddle/legacy/gserver/tests/test_SeqSliceLayerGrad.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" #include "LayerGradUtil.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/gserver/tests/test_Upsample.cpp b/paddle/legacy/gserver/tests/test_Upsample.cpp similarity index 99% rename from paddle/gserver/tests/test_Upsample.cpp rename to paddle/legacy/gserver/tests/test_Upsample.cpp index 39b902fcc7..940d46baf7 100644 --- a/paddle/gserver/tests/test_Upsample.cpp +++ b/paddle/legacy/gserver/tests/test_Upsample.cpp @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "LayerGradUtil.h" -#include "paddle/math/MathUtils.h" +#include "paddle/legacy/math/MathUtils.h" #include "paddle/testing/TestUtil.h" void setPoolConfig(paddle::TestConfig* config, diff --git a/paddle/gserver/tests/test_WarpCTCLayer.cpp b/paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp similarity index 96% rename from paddle/gserver/tests/test_WarpCTCLayer.cpp rename to paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp index f2299d7da2..b1697e1616 100644 --- a/paddle/gserver/tests/test_WarpCTCLayer.cpp +++ b/paddle/legacy/gserver/tests/test_WarpCTCLayer.cpp @@ -13,12 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include "ModelConfig.pb.h" -#include "paddle/gserver/layers/CTCLayer.h" -#include "paddle/gserver/layers/DataLayer.h" -#include "paddle/gserver/layers/Layer.h" -#include "paddle/gserver/layers/WarpCTCLayer.h" +#include "paddle/legacy/gserver/layers/CTCLayer.h" +#include "paddle/legacy/gserver/layers/DataLayer.h" +#include "paddle/legacy/gserver/layers/Layer.h" +#include "paddle/legacy/gserver/layers/WarpCTCLayer.h" #include "paddle/testing/TestUtil.h" diff --git a/paddle/math/Allocator.h b/paddle/legacy/math/Allocator.h similarity index 97% rename from paddle/math/Allocator.h rename to paddle/legacy/math/Allocator.h index ae60f6fe5f..ffb5ec1cad 100644 --- a/paddle/math/Allocator.h +++ b/paddle/legacy/math/Allocator.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "hl_gpu.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { @@ -27,7 +27,7 @@ namespace paddle { * This is the base class of all Allocator class. */ class Allocator { -public: + public: virtual ~Allocator() {} virtual void* alloc(size_t size) = 0; virtual void free(void* ptr) = 0; @@ -38,7 +38,7 @@ public: * @brief CPU allocator implementation. */ class CpuAllocator : public Allocator { -public: + public: ~CpuAllocator() {} /** @@ -76,7 +76,7 @@ public: * @brief GPU allocator implementation. */ class GpuAllocator : public Allocator { -public: + public: ~GpuAllocator() {} /** @@ -107,7 +107,7 @@ public: * @brief CPU pinned memory allocator implementation. */ class CudaHostAllocator : public Allocator { -public: + public: ~CudaHostAllocator() {} /** diff --git a/paddle/math/BaseMatrix.cu b/paddle/legacy/math/BaseMatrix.cu similarity index 99% rename from paddle/math/BaseMatrix.cu rename to paddle/legacy/math/BaseMatrix.cu index 7b57419e5a..7e7cdc57a9 100644 --- a/paddle/math/BaseMatrix.cu +++ b/paddle/legacy/math/BaseMatrix.cu @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include #include "BaseMatrix.h" diff --git a/paddle/math/BaseMatrix.h b/paddle/legacy/math/BaseMatrix.h similarity index 99% rename from paddle/math/BaseMatrix.h rename to paddle/legacy/math/BaseMatrix.h index 00ce5a1949..4627f847d3 100644 --- a/paddle/math/BaseMatrix.h +++ b/paddle/legacy/math/BaseMatrix.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include "TensorExpression.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { @@ -43,7 +43,7 @@ typedef bool_constant true_type; address += row * ld + col; class MatrixOffset { -public: + public: size_t aCol_; size_t aRow_; size_t bCol_; @@ -72,14 +72,14 @@ public: template class BaseMatrixT : public TensorExpression, T> { -public: + public: size_t height_, width_; size_t stride_; T* data_; bool trans_; bool useGpu_; -public: + public: virtual ~BaseMatrixT() {} BaseMatrixT(size_t height, size_t width, T* data, bool trans, bool useGpu) : height_(height), diff --git a/paddle/math/CMakeLists.txt b/paddle/legacy/math/CMakeLists.txt similarity index 80% rename from paddle/math/CMakeLists.txt rename to paddle/legacy/math/CMakeLists.txt index 922fb51722..9992ec71f4 100644 --- a/paddle/math/CMakeLists.txt +++ b/paddle/legacy/math/CMakeLists.txt @@ -37,13 +37,13 @@ if(MOBILE_INFERENCE) ${CMAKE_CURRENT_SOURCE_DIR}/SparseRowMatrix.cpp) endif() set(MATH_SOURCES - "${PADDLE_SOURCE_DIR}/paddle/math/BaseMatrix.cu" - "${PADDLE_SOURCE_DIR}/paddle/math/TrainingAlgorithmOp.cu" + "${PADDLE_SOURCE_DIR}/paddle/legacy/math/BaseMatrix.cu" + "${PADDLE_SOURCE_DIR}/paddle/legacy/math/TrainingAlgorithmOp.cu" ${MATH_SOURCES}) if(NOT WITH_GPU) # then compile BaseMatrix.cu as c++ file - compile_cu_as_cpp("${PADDLE_SOURCE_DIR}/paddle/math/BaseMatrix.cu") - compile_cu_as_cpp("${PADDLE_SOURCE_DIR}/paddle/math/TrainingAlgorithmOp.cu") + compile_cu_as_cpp("${PADDLE_SOURCE_DIR}/paddle/legacy/math/BaseMatrix.cu") + compile_cu_as_cpp("${PADDLE_SOURCE_DIR}/paddle/legacy/math/TrainingAlgorithmOp.cu") add_library(paddle_math STATIC ${MATH_SOURCES}) else() @@ -51,10 +51,6 @@ else() endif() - -add_style_check_target(paddle_math ${MATH_SOURCES}) -add_style_check_target(paddle_math ${MATH_HEADERS}) - add_dependencies(paddle_math paddle_proto ${external_project_dependencies}) # depends if(WITH_TESTING) add_subdirectory(tests) diff --git a/paddle/math/CpuSparseMatrix.cpp b/paddle/legacy/math/CpuSparseMatrix.cpp similarity index 99% rename from paddle/math/CpuSparseMatrix.cpp rename to paddle/legacy/math/CpuSparseMatrix.cpp index 023450ffb7..20c65a3a1d 100644 --- a/paddle/math/CpuSparseMatrix.cpp +++ b/paddle/legacy/math/CpuSparseMatrix.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include "SparseMatrix.h" #include "float.h" #include "hl_gpu.h" -#include "paddle/math/MathUtils.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/math/CpuSparseMatrix.h b/paddle/legacy/math/CpuSparseMatrix.h similarity index 99% rename from paddle/math/CpuSparseMatrix.h rename to paddle/legacy/math/CpuSparseMatrix.h index 22b6b71688..172792c295 100644 --- a/paddle/math/CpuSparseMatrix.h +++ b/paddle/legacy/math/CpuSparseMatrix.h @@ -22,7 +22,7 @@ limitations under the License. */ namespace paddle { class CpuSparseMatrix : public Matrix { -public: + public: CpuSparseMatrix(size_t height, size_t width, size_t nnz, /* used to allocate space */ @@ -291,10 +291,10 @@ public: LOG(FATAL) << "not supported!"; } -private: + private: MatrixPtr clone(size_t height = 0, size_t width = 0, bool useGpu = false); -protected: + protected: void sparseResize(); /*for csr , record row start position, for csc, record row index for every no * zero value*/ @@ -310,10 +310,10 @@ protected: static ThreadLocal> cpuLocalMats_; // BaseMatrixT interface -public: + public: bool isSparse() const { return true; } -private: + private: using Matrix::mul; using Matrix::copyFrom; using Matrix::rowMax; @@ -329,7 +329,7 @@ private: namespace paddle { class CpuSparseMatrix : public Matrix { -public: + public: CpuSparseMatrix(size_t height, size_t width, size_t nnz, /* used to allocate space */ diff --git a/paddle/math/ExecViaCpu.h b/paddle/legacy/math/ExecViaCpu.h similarity index 98% rename from paddle/math/ExecViaCpu.h rename to paddle/legacy/math/ExecViaCpu.h index 9b2a3c2b8a..ec2337545e 100644 --- a/paddle/math/ExecViaCpu.h +++ b/paddle/legacy/math/ExecViaCpu.h @@ -31,17 +31,17 @@ namespace paddle { template class CopyToCpu { -public: + public: explicit CopyToCpu(Arg& arg) : arg_(arg) {} Arg& copiedArg() const { return arg_; } -private: + private: Arg& arg_; }; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(Matrix& arg) : arg_(arg) { if (arg.useGpu()) { CHECK(!arg.isTransposed()) << "Not supported"; @@ -59,14 +59,14 @@ public: } Matrix& copiedArg() const { return copied_ ? *copied_ : arg_; } -private: + private: Matrix& arg_; MatrixPtr copied_; }; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(const Matrix& arg) : arg_(arg) { if (arg.useGpu()) { CHECK(!arg.isTransposed()) << "Not supported"; @@ -79,14 +79,14 @@ public: } const Matrix& copiedArg() const { return copied_ ? *copied_ : arg_; } -private: + private: const Matrix& arg_; MatrixPtr copied_; }; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(IVector& arg) : arg_(arg) { if (arg.useGpu()) { copied_ = IVector::create(arg.getSize(), /* useGpu= */ false); @@ -100,14 +100,14 @@ public: } IVector& copiedArg() const { return copied_ ? *copied_ : arg_; } -private: + private: IVector& arg_; IVectorPtr copied_; }; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(const IVector& arg) : arg_(arg) { if (arg.useGpu()) { copied_ = IVector::create(arg.getSize(), /* useGpu= */ false); @@ -116,7 +116,7 @@ public: } const IVector& copiedArg() const { return copied_ ? *copied_ : arg_; } -private: + private: const IVector& arg_; IVectorPtr copied_; }; @@ -128,7 +128,7 @@ class GpuFuncWrapperImp; template class GpuFuncWrapperBase { -public: + public: typedef R ResultType; R operator()(F&& f, Args... args) { return f(CopyToCpu::type>(args) diff --git a/paddle/math/MKLDNNMatrix.cpp b/paddle/legacy/math/MKLDNNMatrix.cpp similarity index 100% rename from paddle/math/MKLDNNMatrix.cpp rename to paddle/legacy/math/MKLDNNMatrix.cpp diff --git a/paddle/math/MKLDNNMatrix.h b/paddle/legacy/math/MKLDNNMatrix.h similarity index 98% rename from paddle/math/MKLDNNMatrix.h rename to paddle/legacy/math/MKLDNNMatrix.h index e1fb81679a..5a0e5f8592 100644 --- a/paddle/math/MKLDNNMatrix.h +++ b/paddle/legacy/math/MKLDNNMatrix.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include "Matrix.h" #include "mkldnn.hpp" -#include "paddle/parameter/Parameter.h" +#include "paddle/legacy/parameter/Parameter.h" namespace paddle { @@ -35,7 +35,7 @@ typedef std::shared_ptr MKLDNNMatrixPtr; * */ class MKLDNNMatrix : public CpuMatrix, public mkldnn::memory { -public: + public: MKLDNNMatrix(CpuMatrixPtr m, mkldnn::memory::primitive_desc pd) : CpuMatrix(m->getData(), m->getHeight(), m->getWidth(), false), mkldnn::memory(pd, m->getData()), @@ -107,7 +107,7 @@ public: dst.copyFrom(*m_); } -public: + public: /** * Reorder this MKLDNNMatrix from other format. * Support inplace reorder. @@ -226,7 +226,7 @@ public: */ mkldnn::engine getEngine() { return getPrimitiveDesc().get_engine(); } -protected: + protected: /** * Do reorder once. * Can support inplace. @@ -248,7 +248,7 @@ protected: set_data_handle(data); } -private: + private: // save the CpuMatrixPtr in case the buffer released outside CpuMatrixPtr m_; }; diff --git a/paddle/math/MathFunctions.cpp b/paddle/legacy/math/MathFunctions.cpp similarity index 93% rename from paddle/math/MathFunctions.cpp rename to paddle/legacy/math/MathFunctions.cpp index de404cad89..bbf34a32f3 100644 --- a/paddle/math/MathFunctions.cpp +++ b/paddle/legacy/math/MathFunctions.cpp @@ -12,10 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "MathFunctions.h" +#include "paddle/legacy/math/MathFunctions.h" #include "hl_matrix_apply.cuh" #include "hl_matrix_ops.cuh" -#include "paddle/utils/DynamicLoader.h" +#include "paddle/legacy/utils/DynamicLoader.h" namespace dynload { @@ -240,6 +240,36 @@ template <> void vAdd(const int n, const double* a, const double* b, double* r) { vdAdd(n, a, b, r); } + +template <> +void vTanh(const int n, const float* a, float* r) { + vsTanh(n, a, r); +} + +template <> +void vTanh(const int n, const double* a, double* r) { + vdTanh(n, a, r); +} + +template <> +void vInvSqrt(const int n, const float* a, float* r) { + vsInvSqrt(n, a, r); +} + +template <> +void vInvSqrt(const int n, const double* a, double* r) { + vdInvSqrt(n, a, r); +} + +template <> +void vLog1p(const int n, const float* a, float* r) { + vsLog1p(n, a, r); +} + +template <> +void vLog1p(const int n, const double* a, double* r) { + vdLog1p(n, a, r); +} #else DEFINE_MATRIX_BINARY_OP(vExp, b = std::exp(a)); @@ -277,17 +307,6 @@ void vAdd(const int n, const T* a, const T* b, T* r) { n); } -template void vExp(const int n, const float* a, float* r); -template void vExp(const int n, const double* a, double* r); -template void vLog(const int n, const float* a, float* r); -template void vLog(const int n, const double* a, double* r); -template void vPow(const int n, const float* a, const float b, float* r); -template void vPow(const int n, const double* a, const double b, double* r); -template void vAdd(const int n, const float* a, const float* b, float* r); -template void vAdd(const int n, const double* a, const double* b, double* r); - -#endif - DEFINE_MATRIX_BINARY_OP(vInvSqrt, b = 1.0f / std::sqrt(a)); template void vInvSqrt(const int n, const T* a, T* r) { @@ -311,11 +330,19 @@ void vTanh(const int n, const T* a, T* r) { binary::vTanh(), const_cast(a), r, 1, n, n, n); } +template void vExp(const int n, const float* a, float* r); +template void vExp(const int n, const double* a, double* r); +template void vLog(const int n, const float* a, float* r); +template void vLog(const int n, const double* a, double* r); +template void vPow(const int n, const float* a, const float b, float* r); +template void vPow(const int n, const double* a, const double b, double* r); +template void vAdd(const int n, const float* a, const float* b, float* r); +template void vAdd(const int n, const double* a, const double* b, double* r); template void vInvSqrt(const int n, const double* a, double* r); template void vInvSqrt(const int n, const float* a, float* r); template void vLog1p(const int n, const float* a, float* r); template void vLog1p(const int n, const double* a, double* r); template void vTanh(const int n, const float* a, float* r); template void vTanh(const int n, const double* a, double* r); - +#endif } // namespace paddle diff --git a/paddle/math/MathFunctions.h b/paddle/legacy/math/MathFunctions.h similarity index 96% rename from paddle/math/MathFunctions.h rename to paddle/legacy/math/MathFunctions.h index f3d8b1a39e..854e4baa39 100644 --- a/paddle/math/MathFunctions.h +++ b/paddle/legacy/math/MathFunctions.h @@ -12,8 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#ifndef MATHFUNCTIONS_H_ -#define MATHFUNCTIONS_H_ +#pragma once #ifdef PADDLE_WITH_MKLML #include @@ -21,7 +20,7 @@ limitations under the License. */ #include #endif -#if defined(PADDLE_USE_VECLIB) +#ifdef PADDLE_USE_VECLIB extern "C" { #include #include @@ -30,8 +29,10 @@ extern "C" { #ifdef PADDLE_USE_OPENBLAS #include +#ifdef LAPACK_FOUND #include #endif +#endif #ifndef LAPACK_FOUND extern "C" { @@ -126,5 +127,3 @@ template void vTanh(const int n, const T* a, T* r); } // namespace paddle - -#endif // MATHFUNCTIONS_H_ diff --git a/paddle/math/MathUtils.cpp b/paddle/legacy/math/MathUtils.cpp similarity index 98% rename from paddle/math/MathUtils.cpp rename to paddle/legacy/math/MathUtils.cpp index b2afdbcd51..47ac9c187c 100644 --- a/paddle/math/MathUtils.cpp +++ b/paddle/legacy/math/MathUtils.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "MathUtils.h" #include #include "Vector.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/math/MathUtils.h b/paddle/legacy/math/MathUtils.h similarity index 100% rename from paddle/math/MathUtils.h rename to paddle/legacy/math/MathUtils.h diff --git a/paddle/math/Matrix.cpp b/paddle/legacy/math/Matrix.cpp similarity index 99% rename from paddle/math/Matrix.cpp rename to paddle/legacy/math/Matrix.cpp index bcd6dfe1fd..e53f95006c 100644 --- a/paddle/math/Matrix.cpp +++ b/paddle/legacy/math/Matrix.cpp @@ -26,11 +26,11 @@ limitations under the License. */ #include "hl_gpu.h" #include "hl_table_apply.h" #include "hl_top_k.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "NEONFunctions.h" -#include "paddle/function/GemmFunctor.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/function/GemmFunctor.h" +#include "paddle/legacy/utils/ThreadLocal.h" #include "SIMDFunctions.h" diff --git a/paddle/math/Matrix.h b/paddle/legacy/math/Matrix.h similarity index 99% rename from paddle/math/Matrix.h rename to paddle/legacy/math/Matrix.h index 04e9614eab..ff4f4cfc2a 100644 --- a/paddle/math/Matrix.h +++ b/paddle/legacy/math/Matrix.h @@ -18,20 +18,20 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/ThreadLocal.h" #include #include "BaseMatrix.h" #include "MemoryHandle.h" #include "Vector.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { -/// TODO(tianbing), move to paddle/function/TensorType.h +/// TODO(tianbing), move to paddle/legacy/function/TensorType.h enum SparseValueType { NO_VALUE = 0, FLOAT_VALUE = 1 }; /** @@ -57,7 +57,7 @@ enum SparseValueType { NO_VALUE = 0, FLOAT_VALUE = 1 }; * value [1, 1, 2, 2, 5] * @endcode */ -/// TODO(tianbing), move to paddle/function/TensorType.h +/// TODO(tianbing), move to paddle/legacy/function/TensorType.h enum SparseFormat { SPARSE_CSR = 0, SPARSE_CSC = 1 }; class Matrix; @@ -77,7 +77,7 @@ typedef std::shared_ptr CpuSparseMatrixPtr; * instead. */ class Matrix : public BaseMatrix { -protected: + protected: Matrix(MemoryHandlePtr memHandle, size_t height, size_t width, @@ -95,11 +95,11 @@ protected: static ThreadLocal tmpMat_; -public: + public: size_t elementCnt_; // maximal number of elements which can be held in data_ MemoryHandlePtr memoryHandle_; -public: + public: virtual ~Matrix() {} static MatrixPtr create(MemoryHandlePtr memHandle, @@ -412,7 +412,7 @@ public: LOG(FATAL) << "Not implemented"; } -public: + public: /// Only set all variables to 0 or NULL but not free them. virtual void clear() { height_ = 0; @@ -1228,7 +1228,7 @@ inline std::ostream& operator<<(std::ostream& os, const Matrix& mat) { } class GpuMatrix : public Matrix { -public: + public: GpuMatrix(); GpuMatrix(size_t height, size_t width, bool trans = false); @@ -1660,11 +1660,11 @@ public: }; class CpuMatrix : public Matrix { -private: + private: MatrixPtr sftmaxSum_; MatrixPtr sftmaxDot_; -public: + public: CpuMatrix(size_t height, size_t width, bool trans = false); CpuMatrix(real* data, size_t height, size_t width, bool trans = false) : Matrix(data, height, width, trans, false) {} @@ -1892,7 +1892,7 @@ public: real* getRow(size_t row) { return BaseMatrix::rowBuf(row); } virtual real* getRowBuf(size_t row) { return getRow(row); } -public: + public: /// add b to each sample of this. void addBias(Matrix& b, real scale); void addSharedBias(Matrix& b, real scale); @@ -2128,7 +2128,7 @@ public: }; class SharedCpuMatrix : public CpuMatrix { -public: + public: #ifndef PADDLE_MOBILE_INFERENCE /* blockNum is number of partitions of the matrix */ SharedCpuMatrix(int blockNum, size_t height, size_t width, bool trans = false) @@ -2160,12 +2160,12 @@ public: ~SharedCpuMatrix() {} -public: + public: virtual void mul(CpuSparseMatrix* a, CpuMatrix* b, real scaleAB, real scaleT); virtual void add(Matrix& b, real p1, real p2); virtual void add(real p1, real p2); -private: + private: using Matrix::mul; void initShared(int blockNum); void initBlock(int blockNum); diff --git a/paddle/math/MatrixBitCode.cpp b/paddle/legacy/math/MatrixBitCode.cpp similarity index 98% rename from paddle/math/MatrixBitCode.cpp rename to paddle/legacy/math/MatrixBitCode.cpp index 61a9923bc2..f35f266a30 100644 --- a/paddle/math/MatrixBitCode.cpp +++ b/paddle/legacy/math/MatrixBitCode.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Matrix.h" #include "hl_gpu.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -27,7 +27,7 @@ struct SimpleCode { inline bool calcBit(int bit) const { return c_ & (1 << bit); } inline int getLength() const { return findLastSet(c_) - 1; } -private: + private: size_t c_; }; @@ -39,7 +39,7 @@ struct SimpleCodeTable { size_t size() const { return numClasses_; } int getMaxCodeLength() const { return findLastSet(numClasses_ - 1); } -private: + private: size_t numClasses_; int maxCodeLength_; }; diff --git a/paddle/math/MemoryHandle.cpp b/paddle/legacy/math/MemoryHandle.cpp similarity index 100% rename from paddle/math/MemoryHandle.cpp rename to paddle/legacy/math/MemoryHandle.cpp diff --git a/paddle/math/MemoryHandle.h b/paddle/legacy/math/MemoryHandle.h similarity index 97% rename from paddle/math/MemoryHandle.h rename to paddle/legacy/math/MemoryHandle.h index 03ee413c12..516e09dbed 100644 --- a/paddle/math/MemoryHandle.h +++ b/paddle/legacy/math/MemoryHandle.h @@ -20,16 +20,16 @@ limitations under the License. */ namespace paddle { class MemoryHandle { -protected: + protected: explicit MemoryHandle(size_t size); virtual ~MemoryHandle() {} -public: + public: void* getBuf() const { return buf_; } size_t getSize() const { return size_; } size_t getAllocSize() const { return allocSize_; } -protected: + protected: PoolAllocator* allocator_; size_t size_; // the requested size size_t allocSize_; // the allocated size @@ -43,7 +43,7 @@ protected: * The raw handle will be released at destructor */ class GpuMemoryHandle : public MemoryHandle { -public: + public: explicit GpuMemoryHandle(size_t size); virtual ~GpuMemoryHandle(); }; @@ -54,7 +54,7 @@ public: * The raw handle will be released at destructor */ class CpuMemoryHandle : public MemoryHandle { -public: + public: explicit CpuMemoryHandle(size_t size); virtual ~CpuMemoryHandle(); }; diff --git a/paddle/math/NEONFunctions.cpp b/paddle/legacy/math/NEONFunctions.cpp similarity index 100% rename from paddle/math/NEONFunctions.cpp rename to paddle/legacy/math/NEONFunctions.cpp diff --git a/paddle/math/NEONFunctions.h b/paddle/legacy/math/NEONFunctions.h similarity index 100% rename from paddle/math/NEONFunctions.h rename to paddle/legacy/math/NEONFunctions.h diff --git a/paddle/math/PoolAllocator.cpp b/paddle/legacy/math/PoolAllocator.cpp similarity index 100% rename from paddle/math/PoolAllocator.cpp rename to paddle/legacy/math/PoolAllocator.cpp diff --git a/paddle/math/PoolAllocator.h b/paddle/legacy/math/PoolAllocator.h similarity index 98% rename from paddle/math/PoolAllocator.h rename to paddle/legacy/math/PoolAllocator.h index 90141fef3f..7239cf1c44 100644 --- a/paddle/math/PoolAllocator.h +++ b/paddle/legacy/math/PoolAllocator.h @@ -27,7 +27,7 @@ namespace paddle { * @brief Memory pool allocator implementation. */ class PoolAllocator { -public: + public: /** * @brief constructor. * @param allocator a Allocator object. @@ -47,7 +47,7 @@ public: void free(void* ptr, size_t size); std::string getName() { return name_; } -private: + private: void freeAll(); void printAll(); std::unique_ptr allocator_; diff --git a/paddle/math/RowBuffer.h b/paddle/legacy/math/RowBuffer.h similarity index 98% rename from paddle/math/RowBuffer.h rename to paddle/legacy/math/RowBuffer.h index 2e4d11a86b..9dfd5eff06 100644 --- a/paddle/math/RowBuffer.h +++ b/paddle/legacy/math/RowBuffer.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include #include "MemoryHandle.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -26,7 +26,7 @@ namespace paddle { * If not set memory handler, then the data could be auto growth. */ class RowBuffer { -public: + public: /** * @brief RowBuffer create a auto-growth row buffer. The row length is width. * @param width the length of each row, a.k.a matrix width. @@ -129,7 +129,7 @@ public: */ inline size_t getWidth() const { return width_; } -private: + private: //! TODO(yuyang18): Add resize method to CpuMemHandlePtr, then we can get rid //! of std::vector here. CpuMemHandlePtr preallocatedBuf_; diff --git a/paddle/math/SIMDFunctions.cpp b/paddle/legacy/math/SIMDFunctions.cpp similarity index 100% rename from paddle/math/SIMDFunctions.cpp rename to paddle/legacy/math/SIMDFunctions.cpp diff --git a/paddle/math/SIMDFunctions.h b/paddle/legacy/math/SIMDFunctions.h similarity index 100% rename from paddle/math/SIMDFunctions.h rename to paddle/legacy/math/SIMDFunctions.h diff --git a/paddle/math/SparseMatrix.cpp b/paddle/legacy/math/SparseMatrix.cpp similarity index 99% rename from paddle/math/SparseMatrix.cpp rename to paddle/legacy/math/SparseMatrix.cpp index 1faa343dbc..6f68252b0a 100644 --- a/paddle/math/SparseMatrix.cpp +++ b/paddle/legacy/math/SparseMatrix.cpp @@ -18,7 +18,7 @@ limitations under the License. */ #include #include "hl_gpu.h" #include "hl_top_k.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/math/SparseMatrix.h b/paddle/legacy/math/SparseMatrix.h similarity index 98% rename from paddle/math/SparseMatrix.h rename to paddle/legacy/math/SparseMatrix.h index 7c525f4edf..9181fa2923 100644 --- a/paddle/math/SparseMatrix.h +++ b/paddle/legacy/math/SparseMatrix.h @@ -25,7 +25,7 @@ namespace paddle { typedef std::shared_ptr<_hl_sparse_matrix_s> hl_sparse_matrix_s_ptr; class GpuSparseMatrix : public Matrix { -public: + public: MemoryHandlePtr sMemoryHandle_; int* rows_; int* cols_; @@ -36,7 +36,7 @@ public: SparseValueType valueType_; SparseFormat format_; -public: + public: GpuSparseMatrix(size_t height, size_t width, size_t nnz, /* used to allocate space */ @@ -73,7 +73,7 @@ public: bool trans, MemoryHandlePtr sMemoryHandle); -protected: + protected: struct Element { int row; int col; @@ -82,7 +82,7 @@ protected: : row(rowIn), col(colIn), val(valIn) {} }; -public: + public: ~GpuSparseMatrix() {} void resize(size_t newHeight, @@ -211,13 +211,13 @@ public: */ void rowMax(IVector& maxIds, Matrix& maxVal); -protected: + protected: void sparseResize(); void copyRow(int offsets, size_t colNum, const sparse_non_value_t* row); void copyRow(int offsets, size_t colNum, const sparse_float_value_t* row); -public: + public: void mul(const Matrix& a, const Matrix& b, real scaleAB, real scaleT); void copyFrom(CpuSparseMatrix& src, hl_stream_t stream); @@ -228,10 +228,10 @@ public: void trimFromCSC(const CpuSparseMatrix& src); // BaseMatrixT interface -public: + public: bool isSparse() const { return true; } -private: + private: using Matrix::mul; using Matrix::copyFrom; using Matrix::rowMax; @@ -248,7 +248,7 @@ private: namespace paddle { class GpuSparseMatrix : public Matrix { -public: + public: GpuSparseMatrix(size_t height, size_t width, size_t nnz, /* used to allocate space */ diff --git a/paddle/math/SparseRowMatrix.cpp b/paddle/legacy/math/SparseRowMatrix.cpp similarity index 98% rename from paddle/math/SparseRowMatrix.cpp rename to paddle/legacy/math/SparseRowMatrix.cpp index 4254175aab..39bcdf2298 100644 --- a/paddle/math/SparseRowMatrix.cpp +++ b/paddle/legacy/math/SparseRowMatrix.cpp @@ -17,12 +17,12 @@ limitations under the License. */ #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "SIMDFunctions.h" -#include "paddle/utils/Thread.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Thread.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/math/SparseRowMatrix.h b/paddle/legacy/math/SparseRowMatrix.h similarity index 98% rename from paddle/math/SparseRowMatrix.h rename to paddle/legacy/math/SparseRowMatrix.h index 3920de32df..e206747a41 100644 --- a/paddle/math/SparseRowMatrix.h +++ b/paddle/legacy/math/SparseRowMatrix.h @@ -21,7 +21,7 @@ limitations under the License. */ #include #include "Matrix.h" #include "RowBuffer.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { * Sparse Row */ class SparseRowCpuMatrix : public CpuMatrix { -public: + public: struct IndexDict { // In the following, global id means the row id in the original matrix. // Local id means the row id in the local storage which only contains @@ -53,7 +53,7 @@ public: virtual ~SparseRowCpuMatrix() {} -public: + public: /** * Get the row buf * @@ -163,7 +163,7 @@ public: return indexDictHandle_->localIndices; } -protected: + protected: template void apply(Func f) { f(buf_->data(), localIndices_->size() * width_); @@ -204,7 +204,7 @@ class SyncThreadPool; /// For prefetching parameters from remote Parameter server class SparsePrefetchRowCpuMatrix : public SparseRowCpuMatrix { -public: + public: SparsePrefetchRowCpuMatrix(CpuMemHandlePtr dataHandle, size_t height, size_t width, @@ -229,13 +229,13 @@ public: */ void setupIndices(); -protected: + protected: void addRows(const unsigned int* ids, size_t len); SyncThreadPool* pool_; }; class SparseAutoGrowRowCpuMatrix : public SparseRowCpuMatrix { -public: + public: SparseAutoGrowRowCpuMatrix(size_t height, size_t width, IndexDictPtr indexDictHandle = nullptr, @@ -258,7 +258,7 @@ public: }; class CacheRowCpuMatrix : public SparseAutoGrowRowCpuMatrix { -public: + public: CacheRowCpuMatrix(size_t height, size_t width, IndexDictPtr indexDictHandle = nullptr, @@ -287,7 +287,7 @@ public: virtual void mul(CpuSparseMatrix* a, CpuMatrix* b, real scaleAB, real scaleT); -public: + public: CpuVectorPtr sourceDataVec_; real* sourceData_; }; @@ -299,7 +299,7 @@ public: * ids are hashed by worker thread id. */ class SparseRowIdsCpuMatrix : public CpuMatrix { -public: + public: SparseRowIdsCpuMatrix(CpuMemHandlePtr dataHandle, size_t height, size_t width, @@ -310,7 +310,7 @@ public: std::vector& getIds(size_t threadId) { return idsArray_[threadId]; } -private: + private: std::vector> idsArray_; }; @@ -320,13 +320,13 @@ private: namespace paddle { class SparseRowCpuMatrix : public CpuMatrix { -public: + public: void reserveStore() {} void clearIndices() {} }; class SparsePrefetchRowCpuMatrix : public SparseRowCpuMatrix { -public: + public: void setupIndices() {} void addRows(MatrixPtr input) {} void addRows(IVectorPtr ids) {} diff --git a/paddle/math/Storage.cpp b/paddle/legacy/math/Storage.cpp similarity index 97% rename from paddle/math/Storage.cpp rename to paddle/legacy/math/Storage.cpp index 5982bf2e56..65d53aeaa9 100644 --- a/paddle/math/Storage.cpp +++ b/paddle/legacy/math/Storage.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "Storage.h" #include "Allocator.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" #ifndef PADDLE_MOBILE_INFERENCE DEFINE_int32(pool_limit_size, diff --git a/paddle/math/Storage.h b/paddle/legacy/math/Storage.h similarity index 95% rename from paddle/math/Storage.h rename to paddle/legacy/math/Storage.h index ba8f4689a1..bd22dde2c8 100644 --- a/paddle/math/Storage.h +++ b/paddle/legacy/math/Storage.h @@ -17,7 +17,7 @@ limitations under the License. */ #include #include #include "PoolAllocator.h" -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" namespace paddle { @@ -25,7 +25,7 @@ namespace paddle { * @brief Storage manager for multiple devices. */ class StorageEngine { -public: + public: /** * @return Storage singleton */ @@ -41,7 +41,7 @@ public: */ PoolAllocator* getCpuAllocator(); -protected: + protected: StorageEngine(); ~StorageEngine(); RWLock lock_; diff --git a/paddle/math/TensorApply.h b/paddle/legacy/math/TensorApply.h similarity index 99% rename from paddle/math/TensorApply.h rename to paddle/legacy/math/TensorApply.h index 7d79cae5a1..8b642047bf 100644 --- a/paddle/math/TensorApply.h +++ b/paddle/legacy/math/TensorApply.h @@ -21,7 +21,7 @@ namespace paddle { */ template class TensorApply { -public: + public: explicit INLINE TensorApply(const Derived& p) : data_(p.data_), stride_(p.stride_), @@ -52,7 +52,7 @@ public: */ template class TensorApply { -public: + public: explicit INLINE TensorApply(const Derived& p) : data_(p.data_), stride_(p.stride_), @@ -77,7 +77,7 @@ public: template class TensorApply, T> { -public: + public: explicit TensorApply(const TensorExpression& expr) : expr_(expr.derived()) {} @@ -97,7 +97,7 @@ public: */ template class TensorApply, T> { -public: + public: explicit INLINE TensorApply(const TensorUnaryOp& expr) : op_(expr.op_), expr_(expr.expr_) {} @@ -118,7 +118,7 @@ public: */ template class TensorApply, T> { -public: + public: explicit INLINE TensorApply( const TensorBinaryOp& expr) : op_(expr.op_), lhs_(expr.lhs_), rhs_(expr.rhs_) { @@ -153,7 +153,7 @@ public: */ template class TensorApply, T> { -public: + public: explicit INLINE TensorApply( const TensorTernaryOp& expr) : expr1_(expr.expr1_), expr2_(expr.expr2_), expr3_(expr.expr3_) { @@ -192,7 +192,7 @@ public: */ template class TensorApply, T> { -public: + public: explicit INLINE TensorApply(const TensorConstant& expr) : op_(expr.op_), expr_(expr.expr_) {} diff --git a/paddle/math/TensorAssign.h b/paddle/legacy/math/TensorAssign.h similarity index 98% rename from paddle/math/TensorAssign.h rename to paddle/legacy/math/TensorAssign.h index 113d98c16b..efbfce6c4f 100644 --- a/paddle/math/TensorAssign.h +++ b/paddle/legacy/math/TensorAssign.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { @@ -25,7 +25,7 @@ namespace paddle { */ template class TensorAssignOp { -public: + public: explicit TensorAssignOp(const LhsType& lhs, const RhsType& rhs) : lhs_(lhs), rhs_(rhs) { #ifndef __CUDA_ARCH__ @@ -49,7 +49,7 @@ public: } INLINE bool useGpu() const { return lhs_.useGpu(); } -private: + private: TensorApply lhs_; TensorApply rhs_; }; diff --git a/paddle/math/TensorEvaluate.h b/paddle/legacy/math/TensorEvaluate.h similarity index 98% rename from paddle/math/TensorEvaluate.h rename to paddle/legacy/math/TensorEvaluate.h index 2a722016e7..3029dd35fb 100644 --- a/paddle/math/TensorEvaluate.h +++ b/paddle/legacy/math/TensorEvaluate.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "hl_base.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/math/TensorExpression.h b/paddle/legacy/math/TensorExpression.h similarity index 99% rename from paddle/math/TensorExpression.h rename to paddle/legacy/math/TensorExpression.h index 83229ae65d..1c6cf07831 100644 --- a/paddle/math/TensorExpression.h +++ b/paddle/legacy/math/TensorExpression.h @@ -16,8 +16,8 @@ limitations under the License. */ #include #include #include "hl_tensor_ops.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { @@ -40,7 +40,7 @@ class TensorAssignOp; */ template class TensorExpression { -public: + public: /** * Element wise unary expression. */ @@ -355,7 +355,7 @@ public: return TensorAssignOp(derived(), expr); } -protected: + protected: const Derived& derived() const { return *static_cast(this); } }; @@ -365,7 +365,7 @@ protected: template class TensorUnaryOp : public TensorExpression, T> { -public: + public: explicit TensorUnaryOp(const OP op, const ExprType& expr) : op_(op), expr_(expr) {} @@ -379,7 +379,7 @@ public: template class TensorBinaryOp : public TensorExpression, T> { -public: + public: explicit TensorBinaryOp(const OP op, const LhsType& lhs, const RhsType& rhs) : op_(op), lhs_(lhs), rhs_(rhs) {} @@ -395,7 +395,7 @@ template class TensorTernaryOp : public TensorExpression< TensorTernaryOp, T> { -public: + public: explicit TensorTernaryOp(const ExprType1& expr1, const ExprType2& expr2, const ExprType3& expr3) @@ -412,7 +412,7 @@ public: template class TensorConstant : public TensorExpression, T> { -public: + public: explicit TensorConstant(const OP op, const ExprType& expr) : op_(op), expr_(expr) {} diff --git a/paddle/math/TrainingAlgorithmOp.cu b/paddle/legacy/math/TrainingAlgorithmOp.cu similarity index 99% rename from paddle/math/TrainingAlgorithmOp.cu rename to paddle/legacy/math/TrainingAlgorithmOp.cu index b844768d3b..9e1eaa0f45 100644 --- a/paddle/math/TrainingAlgorithmOp.cu +++ b/paddle/legacy/math/TrainingAlgorithmOp.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include "BaseMatrix.h" #include "TrainingAlgorithmOp.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #if __cplusplus > 199711L diff --git a/paddle/math/TrainingAlgorithmOp.h b/paddle/legacy/math/TrainingAlgorithmOp.h similarity index 99% rename from paddle/math/TrainingAlgorithmOp.h rename to paddle/legacy/math/TrainingAlgorithmOp.h index fe40fc2d36..921c2742cf 100644 --- a/paddle/math/TrainingAlgorithmOp.h +++ b/paddle/legacy/math/TrainingAlgorithmOp.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "BaseMatrix.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/math/Vector.cpp b/paddle/legacy/math/Vector.cpp similarity index 99% rename from paddle/math/Vector.cpp rename to paddle/legacy/math/Vector.cpp index 2a47ed7ef8..87f48bb162 100644 --- a/paddle/math/Vector.cpp +++ b/paddle/legacy/math/Vector.cpp @@ -13,17 +13,17 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Vector.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "Matrix.h" #include "hl_gpu.h" #include "hl_matrix.h" #include "hl_table_apply.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Thread.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Thread.h" +#include "paddle/legacy/utils/ThreadLocal.h" namespace paddle { diff --git a/paddle/math/Vector.h b/paddle/legacy/math/Vector.h similarity index 99% rename from paddle/math/Vector.h rename to paddle/legacy/math/Vector.h index 3efbc769df..63cb4651c5 100644 --- a/paddle/math/Vector.h +++ b/paddle/legacy/math/Vector.h @@ -21,8 +21,8 @@ limitations under the License. */ #include "BaseMatrix.h" #include "MemoryHandle.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Thread.h" namespace paddle { @@ -40,13 +40,13 @@ class Matrix; template class BaseVector : public BaseMatrixT { -public: + public: BaseVector(size_t size, T* data, bool useGpu) : BaseMatrixT(1, size, data, false, useGpu), size_(this->width_) {} ~BaseVector() {} -protected: + protected: size_t& size_; }; @@ -57,7 +57,7 @@ protected: */ template class VectorT : public BaseVector { -protected: + protected: VectorT(size_t size, MemoryHandlePtr memoryHandle, size_t offset, bool useGpu) : BaseVector(size, reinterpret_cast(memoryHandle->getBuf()) + offset, @@ -71,7 +71,7 @@ protected: VectorT(size_t size, T* data, bool useGpu) : BaseVector(size, data, useGpu) {} -public: + public: virtual ~VectorT() {} static std::shared_ptr> create(size_t size, bool useGpu); @@ -281,7 +281,7 @@ public: } } -protected: + protected: friend class GpuVectorT; friend class CpuVectorT; virtual void copyTo(CpuVectorT* dest) const = 0; @@ -297,7 +297,7 @@ std::ostream& operator<<(std::ostream& os, const VectorT& vec) { template class GpuVectorT : public VectorT { -public: + public: explicit GpuVectorT(size_t size); GpuVectorT(size_t size, GpuMemHandlePtr memHandle, size_t offset) : VectorT(size, memHandle, offset, true) {} @@ -343,14 +343,14 @@ public: TensorGpuApply(*this, expr); } -protected: + protected: virtual void copyTo(CpuVectorT* dest) const; virtual void copyTo(GpuVectorT* dest) const; }; template class CpuVectorT : public VectorT { -public: + public: explicit CpuVectorT(size_t size); CpuVectorT(size_t size, MemoryHandlePtr memoryHandle, size_t offset) : VectorT(size, memoryHandle, offset, false) {} @@ -415,7 +415,7 @@ public: template class ParallelCpuVectorT : public CpuVectorT { -public: + public: ParallelCpuVectorT(size_t size, SyncThreadPool* pool) : CpuVectorT(size), pool_(pool) {} @@ -434,7 +434,7 @@ public: virtual void exec(SyncThreadPool::JobFunc jobFunc); -private: + private: typedef std::function& vec)> ExecFunc; void parallelExec(ExecFunc func); SyncThreadPool* pool_; @@ -445,7 +445,7 @@ private: */ template class CpuGpuVectorT { -public: + public: /** * @brief An enum type of SyncedFlag using to * mark data memory is in CPU or GPU. @@ -670,7 +670,7 @@ public: setSync(flag); } -protected: + protected: void resizeOrCreate(size_t size, bool useGpu); /** diff --git a/paddle/math/tests/CMakeLists.txt b/paddle/legacy/math/tests/CMakeLists.txt similarity index 100% rename from paddle/math/tests/CMakeLists.txt rename to paddle/legacy/math/tests/CMakeLists.txt diff --git a/paddle/math/tests/OriginalOptimizerApi.h b/paddle/legacy/math/tests/OriginalOptimizerApi.h similarity index 98% rename from paddle/math/tests/OriginalOptimizerApi.h rename to paddle/legacy/math/tests/OriginalOptimizerApi.h index e30d784b23..f386e19958 100644 --- a/paddle/math/tests/OriginalOptimizerApi.h +++ b/paddle/legacy/math/tests/OriginalOptimizerApi.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/math/Vector.h" -#include "paddle/utils/GlobalConstants.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/GlobalConstants.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/PerfUtils.h b/paddle/legacy/math/tests/PerfUtils.h similarity index 98% rename from paddle/math/tests/PerfUtils.h rename to paddle/legacy/math/tests/PerfUtils.h index bee2351e2f..eaf4869e4c 100644 --- a/paddle/math/tests/PerfUtils.h +++ b/paddle/legacy/math/tests/PerfUtils.h @@ -21,7 +21,7 @@ limitations under the License. */ #else -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" using namespace paddle; // NOLINT #define EXPRESSION_PERFORMANCE(expression) \ diff --git a/paddle/math/tests/TensorCheck.h b/paddle/legacy/math/tests/TensorCheck.h similarity index 96% rename from paddle/math/tests/TensorCheck.h rename to paddle/legacy/math/tests/TensorCheck.h index f4332ede36..41c8ece282 100644 --- a/paddle/math/tests/TensorCheck.h +++ b/paddle/legacy/math/tests/TensorCheck.h @@ -20,7 +20,7 @@ limitations under the License. */ */ #include -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace autotest { @@ -32,7 +32,7 @@ using paddle::CpuVectorT; using paddle::GpuVectorT; class AssertEqual { -public: + public: AssertEqual(real err = 0) : err_(err) {} inline bool operator()(real a, real b) { @@ -51,7 +51,7 @@ public: return true; } -private: + private: real err_; }; @@ -60,71 +60,71 @@ class CopyToCpu; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(const CpuMatrix& arg) : arg_(arg) {} const CpuMatrix& copiedArg() const { return arg_; } -private: + private: const CpuMatrix& arg_; }; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(const GpuMatrix& arg) : arg_(arg.getHeight(), arg.getWidth()) { arg_.copyFrom(arg); } CpuMatrix& copiedArg() { return arg_; } -private: + private: CpuMatrix arg_; }; template <> class CopyToCpu { -public: + public: explicit CopyToCpu(const Matrix& arg) : arg_(arg.getHeight(), arg.getWidth()) { arg_.copyFrom(arg); } CpuMatrix& copiedArg() { return arg_; } -private: + private: CpuMatrix arg_; }; template class CopyToCpu> { -public: + public: explicit CopyToCpu(const CpuVectorT& arg) : arg_(arg) {} const CpuVectorT& copiedArg() const { return arg_; } -private: + private: const CpuVectorT& arg_; }; template class CopyToCpu> { -public: + public: explicit CopyToCpu(const GpuVectorT& arg) : arg_(arg.getSize()) { arg_.copyFrom(arg); } CpuVectorT& copiedArg() { return arg_; } -private: + private: CpuVectorT arg_; }; template class CopyToCpu> { -public: + public: explicit CopyToCpu(const VectorT& arg) : arg_(arg.getSize()) { arg_.copyFrom(arg); } CpuVectorT& copiedArg() { return arg_; } -private: + private: CpuVectorT arg_; }; diff --git a/paddle/math/tests/TestUtils.h b/paddle/legacy/math/tests/TestUtils.h similarity index 97% rename from paddle/math/tests/TestUtils.h rename to paddle/legacy/math/tests/TestUtils.h index d2b9706432..60e76359da 100644 --- a/paddle/math/tests/TestUtils.h +++ b/paddle/legacy/math/tests/TestUtils.h @@ -41,8 +41,8 @@ limitations under the License. */ #include #include "TensorCheck.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" namespace autotest { @@ -56,31 +56,31 @@ using paddle::GpuSparseMatrix; template class ReplaceType { -public: + public: typedef T1 type; }; template <> class ReplaceType { -public: + public: typedef CpuMatrix type; }; template <> class ReplaceType { -public: + public: typedef GpuMatrix type; }; template <> class ReplaceType { -public: + public: typedef CpuMatrix type; }; template <> class ReplaceType { -public: + public: typedef GpuMatrix type; }; @@ -180,25 +180,25 @@ R call(C& obj, R (FC::*f)(FArgs...), Args&&... args) { template class ReturnType { -public: + public: typedef T type; }; template <> class ReturnType { -public: + public: typedef GpuMatrix type; }; template <> class ReturnType { -public: + public: typedef GpuIVector type; }; template <> class ReturnType { -public: + public: typedef GpuSparseMatrix type; }; @@ -234,7 +234,7 @@ GpuSparseMatrix autoArgs(CpuSparseMatrix& v) { } class AutoCompare { -public: + public: /** * err is the allowed calculation error. * The smaller the value of err, @@ -285,7 +285,7 @@ public: TensorCheck(compare, cpu, gpu); } -protected: + protected: CpuMatrix cpu; GpuMatrix gpu; AssertEqual compare; diff --git a/paddle/math/tests/test_Allocator.cpp b/paddle/legacy/math/tests/test_Allocator.cpp similarity index 93% rename from paddle/math/tests/test_Allocator.cpp rename to paddle/legacy/math/tests/test_Allocator.cpp index 84bc1c1d9e..122be9082a 100644 --- a/paddle/math/tests/test_Allocator.cpp +++ b/paddle/legacy/math/tests/test_Allocator.cpp @@ -13,12 +13,12 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" #define private public -#include "paddle/math/Allocator.h" -#include "paddle/math/MemoryHandle.h" -#include "paddle/math/PoolAllocator.h" +#include "paddle/legacy/math/Allocator.h" +#include "paddle/legacy/math/MemoryHandle.h" +#include "paddle/legacy/math/PoolAllocator.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_BaseMatrix.cpp b/paddle/legacy/math/tests/test_BaseMatrix.cpp similarity index 99% rename from paddle/math/tests/test_BaseMatrix.cpp rename to paddle/legacy/math/tests/test_BaseMatrix.cpp index 6f7beb60c8..488765c6ac 100644 --- a/paddle/math/tests/test_BaseMatrix.cpp +++ b/paddle/legacy/math/tests/test_BaseMatrix.cpp @@ -21,7 +21,7 @@ limitations under the License. */ #include #include "TestUtils.h" -#include "paddle/math/BaseMatrix.h" +#include "paddle/legacy/math/BaseMatrix.h" using paddle::BaseMatrix; using paddle::Matrix; diff --git a/paddle/math/tests/test_CpuGpuVector.cpp b/paddle/legacy/math/tests/test_CpuGpuVector.cpp similarity index 97% rename from paddle/math/tests/test_CpuGpuVector.cpp rename to paddle/legacy/math/tests/test_CpuGpuVector.cpp index 395541a76a..010fef534d 100644 --- a/paddle/math/tests/test_CpuGpuVector.cpp +++ b/paddle/legacy/math/tests/test_CpuGpuVector.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include -#include "paddle/math/Vector.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Util.h" #include "test_matrixUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_ExecViaCpu.cpp b/paddle/legacy/math/tests/test_ExecViaCpu.cpp similarity index 95% rename from paddle/math/tests/test_ExecViaCpu.cpp rename to paddle/legacy/math/tests/test_ExecViaCpu.cpp index 513c7b440e..b2ce0bc7ed 100644 --- a/paddle/math/tests/test_ExecViaCpu.cpp +++ b/paddle/legacy/math/tests/test_ExecViaCpu.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include +#include +#include #include -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" using namespace paddle; // NOLINT @@ -39,7 +39,7 @@ real f(Matrix& mat1, } class Functor { -public: + public: real operator()(Matrix& mat1, const Matrix& mat2, IVector& vec1, @@ -49,7 +49,7 @@ public: return a_; } -private: + private: real a_; }; diff --git a/paddle/math/tests/test_FPException.cpp b/paddle/legacy/math/tests/test_FPException.cpp similarity index 97% rename from paddle/math/tests/test_FPException.cpp rename to paddle/legacy/math/tests/test_FPException.cpp index d87fdcda9e..aa6aea71c8 100644 --- a/paddle/math/tests/test_FPException.cpp +++ b/paddle/legacy/math/tests/test_FPException.cpp @@ -30,8 +30,8 @@ limitations under the License. */ */ #include -#include "paddle/math/Matrix.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Common.h" using namespace paddle; // NOLINT diff --git a/paddle/math/tests/test_GpuProfiler.cpp b/paddle/legacy/math/tests/test_GpuProfiler.cpp similarity index 97% rename from paddle/math/tests/test_GpuProfiler.cpp rename to paddle/legacy/math/tests/test_GpuProfiler.cpp index 828159660b..ee27109f21 100644 --- a/paddle/math/tests/test_GpuProfiler.cpp +++ b/paddle/legacy/math/tests/test_GpuProfiler.cpp @@ -15,11 +15,11 @@ limitations under the License. */ #ifdef PADDLE_WITH_CUDA #include -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/testing/TestUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/math/tests/test_Matrix.cpp b/paddle/legacy/math/tests/test_Matrix.cpp similarity index 100% rename from paddle/math/tests/test_Matrix.cpp rename to paddle/legacy/math/tests/test_Matrix.cpp diff --git a/paddle/math/tests/test_RowBuffer.cpp b/paddle/legacy/math/tests/test_RowBuffer.cpp similarity index 98% rename from paddle/math/tests/test_RowBuffer.cpp rename to paddle/legacy/math/tests/test_RowBuffer.cpp index e38de853e0..2ef8cd303d 100644 --- a/paddle/math/tests/test_RowBuffer.cpp +++ b/paddle/legacy/math/tests/test_RowBuffer.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/math/RowBuffer.h" +#include "paddle/legacy/math/RowBuffer.h" TEST(RowBuffer, testAutoGrow) { paddle::RowBuffer buf(128); diff --git a/paddle/math/tests/test_SIMDFunctions.cpp b/paddle/legacy/math/tests/test_SIMDFunctions.cpp similarity index 98% rename from paddle/math/tests/test_SIMDFunctions.cpp rename to paddle/legacy/math/tests/test_SIMDFunctions.cpp index b692679436..c6490f70e3 100644 --- a/paddle/math/tests/test_SIMDFunctions.cpp +++ b/paddle/legacy/math/tests/test_SIMDFunctions.cpp @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/math/SIMDFunctions.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/SIMDFunctions.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/math/tests/test_SparseMatrix.cpp b/paddle/legacy/math/tests/test_SparseMatrix.cpp similarity index 99% rename from paddle/math/tests/test_SparseMatrix.cpp rename to paddle/legacy/math/tests/test_SparseMatrix.cpp index dbcbeb8d50..30896a945e 100644 --- a/paddle/math/tests/test_SparseMatrix.cpp +++ b/paddle/legacy/math/tests/test_SparseMatrix.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include "test_matrixUtil.h" diff --git a/paddle/math/tests/test_Tensor.cu b/paddle/legacy/math/tests/test_Tensor.cu similarity index 99% rename from paddle/math/tests/test_Tensor.cu rename to paddle/legacy/math/tests/test_Tensor.cu index acb2da86d0..3ce056d661 100644 --- a/paddle/math/tests/test_Tensor.cu +++ b/paddle/legacy/math/tests/test_Tensor.cu @@ -14,7 +14,7 @@ limitations under the License. */ #include #include "TensorCheck.h" -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" using paddle::Matrix; using paddle::CpuMatrix; diff --git a/paddle/math/tests/test_TrainingAlgorithm.cpp b/paddle/legacy/math/tests/test_TrainingAlgorithm.cpp similarity index 99% rename from paddle/math/tests/test_TrainingAlgorithm.cpp rename to paddle/legacy/math/tests/test_TrainingAlgorithm.cpp index fb146176ca..214ae8971a 100644 --- a/paddle/math/tests/test_TrainingAlgorithm.cpp +++ b/paddle/legacy/math/tests/test_TrainingAlgorithm.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include "OriginalOptimizerApi.h" #include "PerfUtils.h" #include "TensorCheck.h" -#include "paddle/math/TrainingAlgorithmOp.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/TrainingAlgorithmOp.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT @@ -28,14 +28,14 @@ DEFINE_double(max_diff, 1e-13, "max diff allowed"); #endif class SetMaxDiff { -public: + public: explicit SetMaxDiff(double max_diff) { max_diff_ = FLAGS_max_diff; FLAGS_max_diff = max_diff; } ~SetMaxDiff() { FLAGS_max_diff = max_diff_; } -private: + private: double max_diff_; }; diff --git a/paddle/math/tests/test_batchTranspose.cpp b/paddle/legacy/math/tests/test_batchTranspose.cpp similarity index 100% rename from paddle/math/tests/test_batchTranspose.cpp rename to paddle/legacy/math/tests/test_batchTranspose.cpp diff --git a/paddle/math/tests/test_lazyAssign.cu b/paddle/legacy/math/tests/test_lazyAssign.cu similarity index 97% rename from paddle/math/tests/test_lazyAssign.cu rename to paddle/legacy/math/tests/test_lazyAssign.cu index cbd74bbfe3..cf8c3d7719 100644 --- a/paddle/math/tests/test_lazyAssign.cu +++ b/paddle/legacy/math/tests/test_lazyAssign.cu @@ -15,8 +15,8 @@ limitations under the License. */ #include #include "PerfUtils.h" #include "TensorCheck.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/TensorAssign.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/TensorAssign.h" using paddle::BaseMatrix; using paddle::CpuMatrix; diff --git a/paddle/math/tests/test_matrixCompare.cpp b/paddle/legacy/math/tests/test_matrixCompare.cpp similarity index 99% rename from paddle/math/tests/test_matrixCompare.cpp rename to paddle/legacy/math/tests/test_matrixCompare.cpp index e45ddd433f..a43adde46f 100644 --- a/paddle/math/tests/test_matrixCompare.cpp +++ b/paddle/legacy/math/tests/test_matrixCompare.cpp @@ -18,13 +18,13 @@ limitations under the License. */ #include #include "TensorCheck.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseMatrix.h" +#include "paddle/legacy/utils/DynamicLoader.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "paddle/testing/TestUtil.h" -#include "paddle/utils/DynamicLoader.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" using namespace paddle; // NOLINT using namespace std; // NOLINT diff --git a/paddle/math/tests/test_matrixUtil.h b/paddle/legacy/math/tests/test_matrixUtil.h similarity index 98% rename from paddle/math/tests/test_matrixUtil.h rename to paddle/legacy/math/tests/test_matrixUtil.h index 86297547dc..58c93f746e 100644 --- a/paddle/math/tests/test_matrixUtil.h +++ b/paddle/legacy/math/tests/test_matrixUtil.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once #include -#include -#include "paddle/math/SparseMatrix.h" +#include +#include "paddle/legacy/math/SparseMatrix.h" namespace paddle { diff --git a/paddle/math/tests/test_perturbation.cpp b/paddle/legacy/math/tests/test_perturbation.cpp similarity index 99% rename from paddle/math/tests/test_perturbation.cpp rename to paddle/legacy/math/tests/test_perturbation.cpp index ef99dab60a..969400666f 100644 --- a/paddle/math/tests/test_perturbation.cpp +++ b/paddle/legacy/math/tests/test_perturbation.cpp @@ -32,7 +32,7 @@ const int TGT_SIZE = 21; const int CHANNELS = 3; class PerturbationTest : public testing::Test { -protected: + protected: virtual void SetUp() { generateTestImages(gpuImages_); } virtual void TearDown() {} diff --git a/paddle/math/tests/test_sparseMatrixCompare.cpp b/paddle/legacy/math/tests/test_sparseMatrixCompare.cpp similarity index 98% rename from paddle/math/tests/test_sparseMatrixCompare.cpp rename to paddle/legacy/math/tests/test_sparseMatrixCompare.cpp index 12647d21a2..492aa0a689 100644 --- a/paddle/math/tests/test_sparseMatrixCompare.cpp +++ b/paddle/legacy/math/tests/test_sparseMatrixCompare.cpp @@ -18,8 +18,8 @@ limitations under the License. */ /// only cpu version. #include -#include "paddle/math/Matrix.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/utils/Util.h" #include "test_matrixUtil.h" using namespace paddle; // NOLINT diff --git a/paddle/legacy/optimizer/CMakeLists.txt b/paddle/legacy/optimizer/CMakeLists.txt new file mode 100644 index 0000000000..7c80faa48c --- /dev/null +++ b/paddle/legacy/optimizer/CMakeLists.txt @@ -0,0 +1,16 @@ +set(OPITMIZER_SRCS + adadelta_optimizer.cc + adagrad_optimizer.cc + adam_optimizer.cc + optimizer.cc + parameter_optimizer.cc + sgd_optimizer.cc + ) + +add_library(paddle_optimizer ${OPITMIZER_SRCS}) +target_link_libraries(paddle_optimizer paddle_proto glog) + +if (WITH_TESTING) + add_unittest(serialization_test serialization_test.cc) + add_unittest(parameter_optimizer_test parameter_optimizer_test.cc) +endif() diff --git a/paddle/optimizer/adadelta_optimizer.cc b/paddle/legacy/optimizer/adadelta_optimizer.cc similarity index 100% rename from paddle/optimizer/adadelta_optimizer.cc rename to paddle/legacy/optimizer/adadelta_optimizer.cc diff --git a/paddle/optimizer/adadelta_optimizer.h b/paddle/legacy/optimizer/adadelta_optimizer.h similarity index 98% rename from paddle/optimizer/adadelta_optimizer.h rename to paddle/legacy/optimizer/adadelta_optimizer.h index 74df9d54be..5beb62295a 100644 --- a/paddle/optimizer/adadelta_optimizer.h +++ b/paddle/legacy/optimizer/adadelta_optimizer.h @@ -20,7 +20,7 @@ namespace paddle { namespace optimizer { class AdadeltaOptimizer : public ParameterOptimizer { -public: + public: AdadeltaOptimizer( Tensor *parameter, LrPolicy *lr, double rho, double epsilon, double decay) : ParameterOptimizer(parameter, lr), @@ -40,7 +40,7 @@ public: std::string SerializeState(); void DeserializeState(const std::string &state); -private: + private: Tensor *accum_gradient_; Tensor *accum_delta_; Tensor *update_delta_; diff --git a/paddle/optimizer/adagrad_optimizer.cc b/paddle/legacy/optimizer/adagrad_optimizer.cc similarity index 100% rename from paddle/optimizer/adagrad_optimizer.cc rename to paddle/legacy/optimizer/adagrad_optimizer.cc diff --git a/paddle/optimizer/adagrad_optimizer.h b/paddle/legacy/optimizer/adagrad_optimizer.h similarity index 98% rename from paddle/optimizer/adagrad_optimizer.h rename to paddle/legacy/optimizer/adagrad_optimizer.h index 1d58402d78..b6fc067399 100644 --- a/paddle/optimizer/adagrad_optimizer.h +++ b/paddle/legacy/optimizer/adagrad_optimizer.h @@ -20,7 +20,7 @@ namespace paddle { namespace optimizer { class AdagradOptimizer : public ParameterOptimizer { -public: + public: AdagradOptimizer(Tensor *parameter, LrPolicy *lr, double epsilon, @@ -36,7 +36,7 @@ public: std::string SerializeState(); void DeserializeState(const std::string &state); -private: + private: Tensor *accum_gradient_; double epsilon_; double decay_; diff --git a/paddle/optimizer/adam_optimizer.cc b/paddle/legacy/optimizer/adam_optimizer.cc similarity index 100% rename from paddle/optimizer/adam_optimizer.cc rename to paddle/legacy/optimizer/adam_optimizer.cc diff --git a/paddle/optimizer/adam_optimizer.h b/paddle/legacy/optimizer/adam_optimizer.h similarity index 98% rename from paddle/optimizer/adam_optimizer.h rename to paddle/legacy/optimizer/adam_optimizer.h index 7977226c86..fce1096006 100644 --- a/paddle/optimizer/adam_optimizer.h +++ b/paddle/legacy/optimizer/adam_optimizer.h @@ -20,7 +20,7 @@ namespace paddle { namespace optimizer { class AdamOptimizer : public ParameterOptimizer { -public: + public: AdamOptimizer(Tensor *parameter, LrPolicy *lr, double beta_1, @@ -42,7 +42,7 @@ public: std::string SerializeState(); void DeserializeState(const std::string &state); -private: + private: Tensor *momentums_; Tensor *velocitys_; double beta_1_; diff --git a/paddle/optimizer/lr_policy.h b/paddle/legacy/optimizer/lr_policy.h similarity index 98% rename from paddle/optimizer/lr_policy.h rename to paddle/legacy/optimizer/lr_policy.h index 14422d1f42..d639c9f22c 100644 --- a/paddle/optimizer/lr_policy.h +++ b/paddle/legacy/optimizer/lr_policy.h @@ -20,7 +20,7 @@ namespace paddle { namespace optimizer { class LrPolicy { -public: + public: virtual ~LrPolicy() {} virtual double LearningRate(const uint64_t num_sample_passed) = 0; virtual std::string SerializeState() = 0; @@ -29,7 +29,7 @@ public: // constant learning rate policy class ConstLr final : public LrPolicy { -public: + public: ConstLr(double lr) : learning_rate_(lr){}; double LearningRate(const uint64_t num_sample_passed) { return learning_rate_; @@ -45,12 +45,12 @@ public: learning_rate_ = state.learning_rate(); } -private: + private: double learning_rate_; }; class LinearLr final : public LrPolicy { -public: + public: LinearLr(double lr, double lr_decay_a, double lr_decay_b) : learning_rate_(lr), lr_decay_a_(lr_decay_a), lr_decay_b_(lr_decay_b) {} double LearningRate(const uint64_t num_sample_passed) { @@ -72,7 +72,7 @@ public: lr_decay_b_ = state.lr_decay_b(); } -private: + private: double learning_rate_; double lr_decay_a_; double lr_decay_b_; diff --git a/paddle/optimizer/optimizer.cc b/paddle/legacy/optimizer/optimizer.cc similarity index 100% rename from paddle/optimizer/optimizer.cc rename to paddle/legacy/optimizer/optimizer.cc diff --git a/paddle/optimizer/optimizer.h b/paddle/legacy/optimizer/optimizer.h similarity index 100% rename from paddle/optimizer/optimizer.h rename to paddle/legacy/optimizer/optimizer.h diff --git a/paddle/optimizer/parameter_optimizer.cc b/paddle/legacy/optimizer/parameter_optimizer.cc similarity index 100% rename from paddle/optimizer/parameter_optimizer.cc rename to paddle/legacy/optimizer/parameter_optimizer.cc diff --git a/paddle/optimizer/parameter_optimizer.h b/paddle/legacy/optimizer/parameter_optimizer.h similarity index 98% rename from paddle/optimizer/parameter_optimizer.h rename to paddle/legacy/optimizer/parameter_optimizer.h index c7cf8db3ee..d5abca82d5 100644 --- a/paddle/optimizer/parameter_optimizer.h +++ b/paddle/legacy/optimizer/parameter_optimizer.h @@ -26,7 +26,7 @@ namespace paddle { namespace optimizer { class ParameterOptimizer { -public: + public: /** * @brief update hook for algorithm need to traverse parameter more than * once. @@ -45,7 +45,7 @@ public: virtual std::string SerializeState() = 0; virtual void DeserializeState(const std::string &state) = 0; -protected: + protected: Tensor *parameter_; // learning rate policy LrPolicy *lr_policy_; diff --git a/paddle/optimizer/parameter_optimizer_test.cc b/paddle/legacy/optimizer/parameter_optimizer_test.cc similarity index 99% rename from paddle/optimizer/parameter_optimizer_test.cc rename to paddle/legacy/optimizer/parameter_optimizer_test.cc index d663e2fd00..1d9572999e 100644 --- a/paddle/optimizer/parameter_optimizer_test.cc +++ b/paddle/legacy/optimizer/parameter_optimizer_test.cc @@ -38,7 +38,7 @@ paddle::optimizer::Tensor* FixedTensor(size_t size) { } class OptimizerTest : public testing::Test { -public: + public: virtual ~OptimizerTest() {} // init paddle::optimizer::Tensor shape const size_t kSize = 5; @@ -115,7 +115,7 @@ public: } } -private: + private: std::vector opts_; paddle::OptimizerConfig config_; }; diff --git a/paddle/optimizer/serialization.h b/paddle/legacy/optimizer/serialization.h similarity index 97% rename from paddle/optimizer/serialization.h rename to paddle/legacy/optimizer/serialization.h index bf12eed15f..2067a8d8cf 100644 --- a/paddle/optimizer/serialization.h +++ b/paddle/legacy/optimizer/serialization.h @@ -19,7 +19,7 @@ #include #include #include "OptimizerConfig.pb.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include "tensor.h" namespace paddle { diff --git a/paddle/optimizer/serialization_test.cc b/paddle/legacy/optimizer/serialization_test.cc similarity index 100% rename from paddle/optimizer/serialization_test.cc rename to paddle/legacy/optimizer/serialization_test.cc diff --git a/paddle/optimizer/sgd_optimizer.cc b/paddle/legacy/optimizer/sgd_optimizer.cc similarity index 100% rename from paddle/optimizer/sgd_optimizer.cc rename to paddle/legacy/optimizer/sgd_optimizer.cc diff --git a/paddle/optimizer/sgd_optimizer.h b/paddle/legacy/optimizer/sgd_optimizer.h similarity index 98% rename from paddle/optimizer/sgd_optimizer.h rename to paddle/legacy/optimizer/sgd_optimizer.h index f504d98adb..a8957cde54 100644 --- a/paddle/optimizer/sgd_optimizer.h +++ b/paddle/legacy/optimizer/sgd_optimizer.h @@ -20,7 +20,7 @@ namespace paddle { namespace optimizer { class SGDOptimizer : public ParameterOptimizer { -public: + public: SGDOptimizer(Tensor* parameter, LrPolicy* lr, double m, double d, bool n) : ParameterOptimizer(parameter, lr), momentums_(nullptr), @@ -39,7 +39,7 @@ public: std::string SerializeState(); void DeserializeState(const std::string& state); -private: + private: Tensor* momentums_; double momentum_; double decay_; diff --git a/paddle/optimizer/tensor.h b/paddle/legacy/optimizer/tensor.h similarity index 94% rename from paddle/optimizer/tensor.h rename to paddle/legacy/optimizer/tensor.h index fd32398a23..2e58577d4d 100644 --- a/paddle/optimizer/tensor.h +++ b/paddle/legacy/optimizer/tensor.h @@ -18,15 +18,15 @@ #include #include -#include "paddle/utils/Common.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { namespace optimizer { template class TensorT { -public: + public: TensorT(size_t size) : height_(1), width_(size) { // new T[size]() initializes all element to zero value. data_ptr_ = std::shared_ptr(new T[size](), std::default_delete()); @@ -54,7 +54,7 @@ public: // TODO: replace with tensorshape size_t size() const { return this->width_ * this->height_; } -protected: + protected: size_t height_; size_t width_; std::shared_ptr data_ptr_; diff --git a/paddle/parameter/Argument.cpp b/paddle/legacy/parameter/Argument.cpp similarity index 99% rename from paddle/parameter/Argument.cpp rename to paddle/legacy/parameter/Argument.cpp index 94522f718a..3f1d599e90 100644 --- a/paddle/parameter/Argument.cpp +++ b/paddle/legacy/parameter/Argument.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Argument.h" -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" #include diff --git a/paddle/parameter/Argument.h b/paddle/legacy/parameter/Argument.h similarity index 98% rename from paddle/parameter/Argument.h rename to paddle/legacy/parameter/Argument.h index e580d38216..ea8634896c 100644 --- a/paddle/parameter/Argument.h +++ b/paddle/legacy/parameter/Argument.h @@ -13,11 +13,11 @@ limitations under the License. */ #include "hl_gpu.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/parameter/AverageOptimizer.cpp b/paddle/legacy/parameter/AverageOptimizer.cpp similarity index 100% rename from paddle/parameter/AverageOptimizer.cpp rename to paddle/legacy/parameter/AverageOptimizer.cpp diff --git a/paddle/parameter/AverageOptimizer.h b/paddle/legacy/parameter/AverageOptimizer.h similarity index 99% rename from paddle/parameter/AverageOptimizer.h rename to paddle/legacy/parameter/AverageOptimizer.h index 4ad3c18d56..f0fe2fd28e 100644 --- a/paddle/parameter/AverageOptimizer.h +++ b/paddle/legacy/parameter/AverageOptimizer.h @@ -21,7 +21,7 @@ namespace paddle { // After Optimization, parameter values are further averaged within // time range. class AverageOptimizer : public ParameterOptimizer { -public: + public: // if *useParameterApply* set, use PARAMETER_APPLY to store averaged parameter // else use PARAMETER_VALUE, and value backup in PARAMETER_GRADIENT AverageOptimizer(const OptimizationConfig& optConfig, @@ -65,7 +65,7 @@ public: virtual void setNoDecay() { optimizer_->setNoDecay(); } -protected: + protected: std::unique_ptr optimizer_; bool useApply_; @@ -98,7 +98,7 @@ protected: // Average Optimizer with Sparse support. class AverageSparseOptimizer : public AverageOptimizer { -public: + public: AverageSparseOptimizer(const OptimizationConfig& optConfig, ParameterOptimizer* optimizer, bool useParameterApply) @@ -130,7 +130,7 @@ public: t0Vec_.assign(t0Vec_.size(), 0); } -protected: + protected: /** * counting batches, clear after catch up with * t(timer_) is current time, diff --git a/paddle/parameter/CMakeLists.txt b/paddle/legacy/parameter/CMakeLists.txt similarity index 70% rename from paddle/parameter/CMakeLists.txt rename to paddle/legacy/parameter/CMakeLists.txt index d2ae1c16c6..19ae07e077 100644 --- a/paddle/parameter/CMakeLists.txt +++ b/paddle/legacy/parameter/CMakeLists.txt @@ -5,8 +5,6 @@ file(GLOB PARAMETERS_SOURCES . *.cpp) add_library(paddle_parameter STATIC ${PARAMETERS_SOURCES}) -add_style_check_target(paddle_parameter ${PARAMETERS_SOURCES}) -add_style_check_target(paddle_parameter ${PARAMETERS_HEADERS}) add_dependencies(paddle_parameter paddle_proto ${external_project_dependencies}) if(WITH_TESTING) add_subdirectory(tests) diff --git a/paddle/parameter/FirstOrderOptimizer.cpp b/paddle/legacy/parameter/FirstOrderOptimizer.cpp similarity index 98% rename from paddle/parameter/FirstOrderOptimizer.cpp rename to paddle/legacy/parameter/FirstOrderOptimizer.cpp index 182e833405..4f82a115f7 100644 --- a/paddle/parameter/FirstOrderOptimizer.cpp +++ b/paddle/legacy/parameter/FirstOrderOptimizer.cpp @@ -13,9 +13,9 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "FirstOrderOptimizer.h" -#include "paddle/math/TrainingAlgorithmOp.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/TrainingAlgorithmOp.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Util.h" #include diff --git a/paddle/parameter/FirstOrderOptimizer.h b/paddle/legacy/parameter/FirstOrderOptimizer.h similarity index 98% rename from paddle/parameter/FirstOrderOptimizer.h rename to paddle/legacy/parameter/FirstOrderOptimizer.h index 047989fcad..86b9a591af 100644 --- a/paddle/parameter/FirstOrderOptimizer.h +++ b/paddle/legacy/parameter/FirstOrderOptimizer.h @@ -22,7 +22,7 @@ namespace paddle { // Plain SGD optimization. class SgdOptimizer : public ParameterOptimizer { -public: + public: explicit SgdOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) { addParameterType(PARAMETER_MOMENTUM); @@ -77,7 +77,7 @@ class SparseMomentumParameterOptimizer : public ParameterOptimizer { \gamma_t: learning rate at the t'th step */ -public: + public: explicit SparseMomentumParameterOptimizer( const OptimizationConfig& optConfig); virtual void init(size_t numRows, const ParameterConfig* config); @@ -89,7 +89,7 @@ public: const ParameterConfig& config) const; virtual void finishBatch(); -private: + private: real alpha_; real beta_; real tau_; @@ -98,7 +98,7 @@ private: real momentum_; real decayRate_; -protected: + protected: int64_t timer_; mutable std::vector t0Vec_; bool isParameterSparse_; @@ -109,7 +109,7 @@ protected: * http://www.magicbroom.info/Papers/DuchiHaSi10.pdf */ class AdagradParameterOptimizer : public ParameterOptimizer { -public: + public: explicit AdagradParameterOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) { addParameterType(PARAMETER_MOMENTUM); @@ -129,7 +129,7 @@ public: virtual TraverseCallback needSpecialTraversal( const ParameterConfig& config) const; -protected: + protected: int64_t numUpdates_; static const int64_t kMaxNumAccumulates = 16384; }; @@ -139,7 +139,7 @@ protected: * http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf */ class AdaDeltaParameterOptimizer : public ParameterOptimizer { -public: + public: explicit AdaDeltaParameterOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) { addParameterType(PARAMETER_MOMENTUM); @@ -158,14 +158,14 @@ public: const ParameterConfig& config, size_t sparseId) const; -protected: + protected: real rou_; real epsilon_; }; // RMSProp Parameter Optimization. class RMSPropParameterOptimizer : public ParameterOptimizer { -public: + public: explicit RMSPropParameterOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) { addParameterType(PARAMETER_MOMENTUM); @@ -191,7 +191,7 @@ public: const ParameterConfig& config, size_t sparseId) const; -protected: + protected: real rou_; real epsilon_; @@ -208,7 +208,7 @@ protected: // Decayed AdaGrad Optimization. class DecayedAdagradParameterOptimizer : public ParameterOptimizer { -public: + public: explicit DecayedAdagradParameterOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) { addParameterType(PARAMETER_MOMENTUM); @@ -233,7 +233,7 @@ public: const ParameterConfig& config, size_t sparseId) const; -protected: + protected: real rou_; real epsilon_; @@ -253,7 +253,7 @@ protected: * Reference Paper: http://arxiv.org/abs/1412.6980 Algorithm 1 */ class AdamParameterOptimizer : public ParameterOptimizer { -public: + public: explicit AdamParameterOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig), beta1_(optConfig.adam_beta1()), @@ -275,7 +275,7 @@ public: const ParameterConfig& config, size_t sparseId) const; -protected: + protected: real beta1_; real beta2_; real epsilon_; @@ -288,7 +288,7 @@ protected: * Reference Paper: http://arxiv.org/abs/1412.6980 Algorithm 2 */ class AdamaxParameterOptimizer : public ParameterOptimizer { -public: + public: explicit AdamaxParameterOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig), beta1_(optConfig.adam_beta1()), @@ -305,7 +305,7 @@ public: const ParameterConfig& config, size_t sparseId) const; -protected: + protected: real beta1_; real beta2_; int64_t step_; @@ -315,7 +315,7 @@ protected: // Used in pserver, // when PARAMETER_DELTA stores in PARAMETER_GRADIENT. class AddOptimizer : public ParameterOptimizer { -public: + public: explicit AddOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) {} @@ -333,7 +333,7 @@ public: // A optimizer which does nothing. class DummyOptimizer : public ParameterOptimizer { -public: + public: explicit DummyOptimizer(const OptimizationConfig& optConfig) : ParameterOptimizer(optConfig) {} @@ -344,7 +344,7 @@ public: // Do gradient clipping before sgd update class OptimizerWithGradientClipping : public ParameterOptimizer { -public: + public: OptimizerWithGradientClipping(const OptimizationConfig& optConfig, ParameterOptimizer* optimizer) : ParameterOptimizer(optConfig), optimizer_(optimizer) { @@ -374,7 +374,7 @@ public: virtual void setNoDecay() { optimizer_->setNoDecay(); } -protected: + protected: std::unique_ptr optimizer_; }; diff --git a/paddle/parameter/LearningRateScheduler.cpp b/paddle/legacy/parameter/LearningRateScheduler.cpp similarity index 97% rename from paddle/parameter/LearningRateScheduler.cpp rename to paddle/legacy/parameter/LearningRateScheduler.cpp index b6b58e3dda..68c44a7ec4 100644 --- a/paddle/parameter/LearningRateScheduler.cpp +++ b/paddle/legacy/parameter/LearningRateScheduler.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "LearningRateScheduler.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" namespace paddle { @@ -28,20 +28,20 @@ LearningRateScheduler* LearningRateScheduler::create( // LRS stands for LearningRateScheduler class BaseLRS : public LearningRateScheduler { -public: + public: explicit BaseLRS(const OptimizationConfig& config) : learningRate_(config.learning_rate()), a_(config.learning_rate_decay_a()), b_(config.learning_rate_decay_b()) {} -protected: + protected: real learningRate_; real a_; real b_; }; class ConstLRS : public BaseLRS { -public: + public: explicit ConstLRS(const OptimizationConfig& config) : BaseLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { return learningRate_; @@ -50,7 +50,7 @@ public: REGISTER_LEARNING_RATE_SCHEDULER(constant, ConstLRS); class PolyLRS : public BaseLRS { -public: + public: explicit PolyLRS(const OptimizationConfig& config) : BaseLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { return learningRate_ * pow(1.0 + a_ * numSamplesProcessed, -b_); @@ -59,7 +59,7 @@ public: REGISTER_LEARNING_RATE_SCHEDULER(poly, PolyLRS); class CaffePolyLRS : public BaseLRS { -public: + public: explicit CaffePolyLRS(const OptimizationConfig& config) : BaseLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { if (numSamplesProcessed > a_) { @@ -78,7 +78,7 @@ public: REGISTER_LEARNING_RATE_SCHEDULER(caffe_poly, CaffePolyLRS); class ExpLRS : public BaseLRS { -public: + public: explicit ExpLRS(const OptimizationConfig& config) : BaseLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { double decayRatio = (double)numSamplesProcessed / b_; @@ -88,7 +88,7 @@ public: REGISTER_LEARNING_RATE_SCHEDULER(exp, ExpLRS); class DiscreteExpLRS : public BaseLRS { -public: + public: explicit DiscreteExpLRS(const OptimizationConfig& config) : BaseLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { int numDecays = floor(numSamplesProcessed / b_); @@ -98,7 +98,7 @@ public: REGISTER_LEARNING_RATE_SCHEDULER(discexp, DiscreteExpLRS); class LinearLRS : public BaseLRS { -public: + public: explicit LinearLRS(const OptimizationConfig& config) : BaseLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { return std::max(learningRate_ - a_ * numSamplesProcessed, b_); @@ -113,7 +113,7 @@ REGISTER_LEARNING_RATE_SCHEDULER(linear, LinearLRS); then learning_rate = learning_rate_base * rate_i */ class ManualLRS : public BaseLRS { -public: + public: explicit ManualLRS(const OptimizationConfig& config) : BaseLRS(config), currentSegment_(0), lastNum_(0) { std::vector pieces; @@ -151,7 +151,7 @@ public: return learningRate_ * rates_.back(); } -protected: + protected: std::vector rates_; std::vector segments_; size_t currentSegment_; @@ -161,7 +161,7 @@ protected: REGISTER_LEARNING_RATE_SCHEDULER(manual, ManualLRS); class PassManualLRS : public ManualLRS { -public: + public: explicit PassManualLRS(const OptimizationConfig& config) : ManualLRS(config) {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) { diff --git a/paddle/parameter/LearningRateScheduler.h b/paddle/legacy/parameter/LearningRateScheduler.h similarity index 95% rename from paddle/parameter/LearningRateScheduler.h rename to paddle/legacy/parameter/LearningRateScheduler.h index aea99a1c20..fc7e380a6a 100644 --- a/paddle/parameter/LearningRateScheduler.h +++ b/paddle/legacy/parameter/LearningRateScheduler.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include "TrainerConfig.pb.h" -#include "paddle/utils/ClassRegistrar.h" +#include "paddle/legacy/utils/ClassRegistrar.h" namespace paddle { // NOLINTNEXTLINES_4 @@ -26,7 +26,7 @@ namespace paddle { }) class LearningRateScheduler { -public: + public: static LearningRateScheduler* create(const OptimizationConfig& config); virtual ~LearningRateScheduler() {} virtual real calcLearningRate(int64_t numSamplesProcessed, int64_t pass) = 0; diff --git a/paddle/parameter/OptimizerFunctions.cpp b/paddle/legacy/parameter/OptimizerFunctions.cpp similarity index 100% rename from paddle/parameter/OptimizerFunctions.cpp rename to paddle/legacy/parameter/OptimizerFunctions.cpp diff --git a/paddle/parameter/OptimizerFunctions.h b/paddle/legacy/parameter/OptimizerFunctions.h similarity index 100% rename from paddle/parameter/OptimizerFunctions.h rename to paddle/legacy/parameter/OptimizerFunctions.h diff --git a/paddle/parameter/OptimizerWithRegularizer.cpp b/paddle/legacy/parameter/OptimizerWithRegularizer.cpp similarity index 100% rename from paddle/parameter/OptimizerWithRegularizer.cpp rename to paddle/legacy/parameter/OptimizerWithRegularizer.cpp diff --git a/paddle/parameter/OptimizerWithRegularizer.h b/paddle/legacy/parameter/OptimizerWithRegularizer.h similarity index 98% rename from paddle/parameter/OptimizerWithRegularizer.h rename to paddle/legacy/parameter/OptimizerWithRegularizer.h index 7219d96d92..bd29b39663 100644 --- a/paddle/parameter/OptimizerWithRegularizer.h +++ b/paddle/legacy/parameter/OptimizerWithRegularizer.h @@ -20,7 +20,7 @@ namespace paddle { // add regularizer for objective function to do optimization class OptimizerWithRegularizer : public ParameterOptimizer { -public: + public: static ParameterOptimizer* create(const OptimizationConfig& optConfig, const ParameterConfig& paraConfig, bool isParameterSparse, @@ -67,7 +67,7 @@ public: regularizer_->update(vecs, config, optimizer_->getLearningRate(), 0, 1); } -protected: + protected: std::unique_ptr optimizer_; Regularizer* regularizer_; @@ -84,7 +84,7 @@ protected: // Regularized Loss function for every num of batches class OptimizerWithRegularizerEveryNumBatches : public OptimizerWithRegularizer { -public: + public: OptimizerWithRegularizerEveryNumBatches(const OptimizationConfig& optConfig, ParameterOptimizer* optimizer, Regularizer* regularizer) @@ -112,7 +112,7 @@ public: virtual TraverseCallback startCatchUpWith() const; virtual void finishCatchUpWith() { baseTimer_ = timer_; } -protected: + protected: bool isRegularizationBatch(const ParameterConfig& config) const { return ((timer_ + 1) % config.num_batches_regularization() == 0); } @@ -125,7 +125,7 @@ protected: // Regularized Loss function with Sparse support class OptimizerWithRegularizerSparse : public OptimizerWithRegularizer { -public: + public: OptimizerWithRegularizerSparse(const OptimizationConfig& optConfig, ParameterOptimizer* optimizer, Regularizer* regularizer) @@ -145,7 +145,7 @@ public: t0Vec_.assign(t0Vec_.size(), 0); } -protected: + protected: /** * t0Vec_ are last occur time of i rows * if one block is update by multi threads, diff --git a/paddle/parameter/Parameter.cpp b/paddle/legacy/parameter/Parameter.cpp similarity index 98% rename from paddle/parameter/Parameter.cpp rename to paddle/legacy/parameter/Parameter.cpp index 0e6ea90f3d..666d808f0c 100644 --- a/paddle/parameter/Parameter.cpp +++ b/paddle/legacy/parameter/Parameter.cpp @@ -22,10 +22,10 @@ limitations under the License. */ #include "ParameterUpdateFunctions.h" #include "ThreadLocalBuffer.h" #include "hl_gpu.h" -#include "paddle/math/CpuSparseMatrix.h" -#include "paddle/math/MathUtils.h" -#include "paddle/math/SparseRowMatrix.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/CpuSparseMatrix.h" +#include "paddle/legacy/math/MathUtils.h" +#include "paddle/legacy/math/SparseRowMatrix.h" +#include "paddle/legacy/utils/Logging.h" DEFINE_int32(enable_grad_share, (100 * 1024 * 1024), diff --git a/paddle/parameter/Parameter.h b/paddle/legacy/parameter/Parameter.h similarity index 96% rename from paddle/parameter/Parameter.h rename to paddle/legacy/parameter/Parameter.h index 24ac10f3fe..43b567dad0 100644 --- a/paddle/parameter/Parameter.h +++ b/paddle/legacy/parameter/Parameter.h @@ -24,13 +24,13 @@ limitations under the License. */ #include "TrainerConfig.pb.h" #include "ParameterUpdaterHook.h" -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/ThreadLocal.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/ThreadLocal.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -58,7 +58,7 @@ class Parameter; typedef std::shared_ptr ParameterPtr; class Parameter { -public: + public: Parameter(const ParameterConfig& config, bool useGpu, bool doInit = true); const std::string& getName() const { return config_.name(); } @@ -311,7 +311,7 @@ public: } } -protected: + protected: /** * @brief create matrix to matType. * @@ -326,7 +326,7 @@ protected: void clearUpdate() { updateCounter_ = 0; } -protected: + protected: ParameterConfig config_; bool useGpu_; @@ -363,7 +363,7 @@ protected: std::vector> updaterHooks_; -public: + public: void setSharedCount(int cnt) { sharedCount_ = cnt; } int getSharedCount() { return sharedCount_; } diff --git a/paddle/parameter/ParameterOptimizer.cpp b/paddle/legacy/parameter/ParameterOptimizer.cpp similarity index 98% rename from paddle/parameter/ParameterOptimizer.cpp rename to paddle/legacy/parameter/ParameterOptimizer.cpp index 638daa58f1..b9dffa5afb 100644 --- a/paddle/parameter/ParameterOptimizer.cpp +++ b/paddle/legacy/parameter/ParameterOptimizer.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include diff --git a/paddle/parameter/ParameterOptimizer.h b/paddle/legacy/parameter/ParameterOptimizer.h similarity index 99% rename from paddle/parameter/ParameterOptimizer.h rename to paddle/legacy/parameter/ParameterOptimizer.h index a8d0ca72f2..019afa1358 100644 --- a/paddle/parameter/ParameterOptimizer.h +++ b/paddle/legacy/parameter/ParameterOptimizer.h @@ -30,12 +30,12 @@ namespace paddle { * may be called many times, should be no state change between calls. */ class ParameterOptimizer { -public: + public: typedef std::function TraverseCallback; -public: + public: explicit ParameterOptimizer(const OptimizationConfig& optConfig) : applyDecay_(true), optConfig_(optConfig), @@ -175,7 +175,7 @@ public: static ParameterOptimizer* create(const OptimizationConfig& optConfig, bool inPserver = false); -protected: + protected: typedef std::vector TraverseCallbackVec; static TraverseCallback composeCallbacks( diff --git a/paddle/parameter/ParameterUpdateFunctions.cpp b/paddle/legacy/parameter/ParameterUpdateFunctions.cpp similarity index 99% rename from paddle/parameter/ParameterUpdateFunctions.cpp rename to paddle/legacy/parameter/ParameterUpdateFunctions.cpp index db1153c2d6..72c9841acf 100644 --- a/paddle/parameter/ParameterUpdateFunctions.cpp +++ b/paddle/legacy/parameter/ParameterUpdateFunctions.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #ifdef __AVX__ #include #include diff --git a/paddle/parameter/ParameterUpdateFunctions.h b/paddle/legacy/parameter/ParameterUpdateFunctions.h similarity index 95% rename from paddle/parameter/ParameterUpdateFunctions.h rename to paddle/legacy/parameter/ParameterUpdateFunctions.h index 7434baa2d3..a7cc1c4c47 100644 --- a/paddle/parameter/ParameterUpdateFunctions.h +++ b/paddle/legacy/parameter/ParameterUpdateFunctions.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include "paddle/math/Vector.h" -#include "paddle/utils/Common.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Common.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdaterBase.cpp b/paddle/legacy/parameter/ParameterUpdaterBase.cpp similarity index 96% rename from paddle/parameter/ParameterUpdaterBase.cpp rename to paddle/legacy/parameter/ParameterUpdaterBase.cpp index 7815856b45..7d9d3fad63 100644 --- a/paddle/parameter/ParameterUpdaterBase.cpp +++ b/paddle/legacy/parameter/ParameterUpdaterBase.cpp @@ -15,7 +15,7 @@ limitations under the License. */ #include "ParameterUpdaterBase.h" #include #include "hl_gpu.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/parameter/ParameterUpdaterBase.h b/paddle/legacy/parameter/ParameterUpdaterBase.h similarity index 99% rename from paddle/parameter/ParameterUpdaterBase.h rename to paddle/legacy/parameter/ParameterUpdaterBase.h index 717e1c6721..493512886c 100644 --- a/paddle/parameter/ParameterUpdaterBase.h +++ b/paddle/legacy/parameter/ParameterUpdaterBase.h @@ -21,7 +21,7 @@ namespace paddle { class ParameterOptimizer; class ParameterUpdater { -public: + public: ParameterUpdater() : parameterTypes_{PARAMETER_VALUE, PARAMETER_GRADIENT} {} virtual ~ParameterUpdater() {} @@ -89,7 +89,7 @@ public: virtual void setForwardbackwardTime(uint64_t delta) {} #endif -protected: + protected: virtual void updateImpl(Parameter* para) = 0; std::vector parameterTypes_; @@ -101,7 +101,7 @@ protected: // part of all Parameters. It's useful when we need different // update strategy for different Parameter. class ParameterUpdaterComposite : public ParameterUpdater { -public: + public: ParameterUpdaterComposite() {} virtual ~ParameterUpdaterComposite() {} @@ -173,7 +173,7 @@ public: [&](int tid, size_t numThreads) { updaters_[tid]->restore(); }); } -protected: + protected: virtual void updateImpl(Parameter* para) {} std::vector> updaters_; std::unique_ptr syncThreadPool_; diff --git a/paddle/parameter/ParameterUpdaterHook.cpp b/paddle/legacy/parameter/ParameterUpdaterHook.cpp similarity index 95% rename from paddle/parameter/ParameterUpdaterHook.cpp rename to paddle/legacy/parameter/ParameterUpdaterHook.cpp index e6aec3c348..bfb9769fb6 100644 --- a/paddle/parameter/ParameterUpdaterHook.cpp +++ b/paddle/legacy/parameter/ParameterUpdaterHook.cpp @@ -22,10 +22,10 @@ limitations under the License. */ #include #include -#include "paddle/math/Vector.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -37,7 +37,7 @@ namespace paddle { */ class StaticPruningHook : public IParameterUpdaterHook { -public: + public: explicit StaticPruningHook(const ParameterUpdaterHookConfig &hookConfig) : initCount_(0) { sparsityRatio_ = hookConfig.sparsity_ratio(); @@ -96,7 +96,7 @@ public: paraVec->dotMul(*maskVec_); } -private: + private: SameThreadChecker updateThreadChecker_; std::atomic initCount_; VectorPtr maskVec_; @@ -116,12 +116,12 @@ IParameterUpdaterHook::~IParameterUpdaterHook() {} * May be extracted to Util.h to unify the hasher. */ class StringIntPairHasher { -public: + public: size_t operator()(const std::pair &k) const { return intHasher_(strHasher_(k.first) + k.second); } -private: + private: std::hash strHasher_; std::hash intHasher_; }; diff --git a/paddle/parameter/ParameterUpdaterHook.h b/paddle/legacy/parameter/ParameterUpdaterHook.h similarity index 98% rename from paddle/parameter/ParameterUpdaterHook.h rename to paddle/legacy/parameter/ParameterUpdaterHook.h index d30530ec39..cb96e4cf00 100644 --- a/paddle/parameter/ParameterUpdaterHook.h +++ b/paddle/legacy/parameter/ParameterUpdaterHook.h @@ -29,7 +29,7 @@ class Parameter; * parameter optimization. */ class IParameterUpdaterHook { -public: + public: virtual ~IParameterUpdaterHook(); /** @@ -53,7 +53,7 @@ public: */ virtual void init(Parameter* para) = 0; -protected: + protected: /** * Ctor. */ diff --git a/paddle/parameter/Regularizer.cpp b/paddle/legacy/parameter/Regularizer.cpp similarity index 95% rename from paddle/parameter/Regularizer.cpp rename to paddle/legacy/parameter/Regularizer.cpp index d223fd2df6..c1d5f4fa68 100644 --- a/paddle/parameter/Regularizer.cpp +++ b/paddle/legacy/parameter/Regularizer.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Regularizer.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/parameter/Regularizer.h b/paddle/legacy/parameter/Regularizer.h similarity index 99% rename from paddle/parameter/Regularizer.h rename to paddle/legacy/parameter/Regularizer.h index 6bed7b0ddf..fa5384e232 100644 --- a/paddle/parameter/Regularizer.h +++ b/paddle/legacy/parameter/Regularizer.h @@ -20,7 +20,7 @@ namespace paddle { // Regularizer function for parameter, e.g. L1/L2 class Regularizer { -public: + public: virtual void update(const VectorPtr vecs[], const ParameterConfig& paraConfig, real learningRate, // learningrate from optimizer diff --git a/paddle/parameter/ThreadLocalBuffer.cpp b/paddle/legacy/parameter/ThreadLocalBuffer.cpp similarity index 100% rename from paddle/parameter/ThreadLocalBuffer.cpp rename to paddle/legacy/parameter/ThreadLocalBuffer.cpp diff --git a/paddle/parameter/ThreadLocalBuffer.h b/paddle/legacy/parameter/ThreadLocalBuffer.h similarity index 94% rename from paddle/parameter/ThreadLocalBuffer.h rename to paddle/legacy/parameter/ThreadLocalBuffer.h index 07c96e59d0..d360feeed6 100644 --- a/paddle/parameter/ThreadLocalBuffer.h +++ b/paddle/legacy/parameter/ThreadLocalBuffer.h @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #pragma once -#include "paddle/math/Vector.h" +#include "paddle/legacy/math/Vector.h" namespace paddle { namespace parameter { diff --git a/paddle/parameter/Weight.cpp b/paddle/legacy/parameter/Weight.cpp similarity index 98% rename from paddle/parameter/Weight.cpp rename to paddle/legacy/parameter/Weight.cpp index ba4ddce69f..9d94050a5c 100644 --- a/paddle/parameter/Weight.cpp +++ b/paddle/legacy/parameter/Weight.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include "Weight.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { diff --git a/paddle/parameter/Weight.h b/paddle/legacy/parameter/Weight.h similarity index 89% rename from paddle/parameter/Weight.h rename to paddle/legacy/parameter/Weight.h index 7314c29d0d..241c8d829c 100644 --- a/paddle/parameter/Weight.h +++ b/paddle/legacy/parameter/Weight.h @@ -16,19 +16,19 @@ limitations under the License. */ #include #include -#include "paddle/math/Matrix.h" -#include "paddle/math/SparseRowMatrix.h" -#include "paddle/parameter/Parameter.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/SparseRowMatrix.h" +#include "paddle/legacy/parameter/Parameter.h" namespace paddle { class Weight { -private: + private: MatrixPtr weight_; MatrixPtr weightGrad_; ParameterPtr parameter_; -public: + public: Weight(size_t height, size_t width, ParameterPtr parameter); Weight(size_t height, size_t width, ParameterPtr parameter, size_t offset); diff --git a/paddle/parameter/tests/CMakeLists.txt b/paddle/legacy/parameter/tests/CMakeLists.txt similarity index 100% rename from paddle/parameter/tests/CMakeLists.txt rename to paddle/legacy/parameter/tests/CMakeLists.txt diff --git a/paddle/parameter/tests/test_argument.cpp b/paddle/legacy/parameter/tests/test_argument.cpp similarity index 97% rename from paddle/parameter/tests/test_argument.cpp rename to paddle/legacy/parameter/tests/test_argument.cpp index 54ceb3e087..0c632e0cd1 100644 --- a/paddle/parameter/tests/test_argument.cpp +++ b/paddle/legacy/parameter/tests/test_argument.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include using namespace paddle; // NOLINT diff --git a/paddle/parameter/tests/test_common.cpp b/paddle/legacy/parameter/tests/test_common.cpp similarity index 95% rename from paddle/parameter/tests/test_common.cpp rename to paddle/legacy/parameter/tests/test_common.cpp index 6e10becabb..8de9d6da98 100644 --- a/paddle/parameter/tests/test_common.cpp +++ b/paddle/legacy/parameter/tests/test_common.cpp @@ -12,19 +12,19 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include #include -#include -#include -#include -#include +#include +#include +#include +#include using namespace paddle; // NOLINT class CommonTest : public ::testing::Test { -protected: + protected: CommonTest() : testStat_("test") {} virtual ~CommonTest() {} virtual void SetUp() { @@ -51,7 +51,7 @@ protected: virtual void TreaDown() { LOG(INFO) << "All Test Finished."; } -protected: + protected: std::vector> valueUint_; std::vector sizeVec_; real learningRate_; diff --git a/paddle/pserver/BaseClient.cpp b/paddle/legacy/pserver/BaseClient.cpp similarity index 98% rename from paddle/pserver/BaseClient.cpp rename to paddle/legacy/pserver/BaseClient.cpp index a6204ef47e..13bb8a1cc5 100644 --- a/paddle/pserver/BaseClient.cpp +++ b/paddle/legacy/pserver/BaseClient.cpp @@ -16,7 +16,7 @@ limitations under the License. */ #include #include #include -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_string(pservers); diff --git a/paddle/pserver/BaseClient.h b/paddle/legacy/pserver/BaseClient.h similarity index 97% rename from paddle/pserver/BaseClient.h rename to paddle/legacy/pserver/BaseClient.h index a932d34712..66e8f39cd6 100644 --- a/paddle/pserver/BaseClient.h +++ b/paddle/legacy/pserver/BaseClient.h @@ -15,10 +15,10 @@ limitations under the License. */ #pragma once #include "ParameterService.pb.h" -#include "paddle/math/Matrix.h" -#include "paddle/pserver/ProtoServer.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Queue.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/pserver/ProtoServer.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Queue.h" namespace paddle { @@ -32,7 +32,7 @@ namespace paddle { * connections. */ class BaseClient { -protected: + protected: typedef std::unique_ptr ThreadPtr; typedef std::vector> InputIovs; typedef std::vector SendRequest; @@ -49,7 +49,7 @@ protected: SendDataRequestVec parallelDataRequests; }; -public: + public: explicit BaseClient(bool separate = false, int numPorts = FLAGS_ports_num); virtual ~BaseClient(); @@ -141,7 +141,7 @@ public: return dataType; } -protected: + protected: /// for a > 0, b > 0: /// return the smallest x s.t. b*x >= a static int divup(int a, int b) { return (a + b - 1) / b; } @@ -264,7 +264,7 @@ protected: */ virtual void recv(int threadId) = 0; -protected: + protected: bool stopping_; /// nodes * ports that means the number of real pservers int serviceNum_; diff --git a/paddle/pserver/CMakeLists.txt b/paddle/legacy/pserver/CMakeLists.txt similarity index 85% rename from paddle/pserver/CMakeLists.txt rename to paddle/legacy/pserver/CMakeLists.txt index f75475a88f..0ae9c6ef6a 100644 --- a/paddle/pserver/CMakeLists.txt +++ b/paddle/legacy/pserver/CMakeLists.txt @@ -14,9 +14,6 @@ set(NETWORK_HEADERS add_library(paddle_network STATIC ${NETWORK_SOURCES}) -add_style_check_target(paddle_network ${NETWORK_SOURCES}) -add_style_check_target(paddle_network ${NETWORK_HEADERS}) - add_dependencies(paddle_network paddle_proto ${external_project_dependencies}) ################### paddle_pserver ###################### @@ -37,9 +34,6 @@ set(PSERVER_HEADERS add_library(paddle_pserver STATIC ${PSERVER_SOURCES}) -add_style_check_target(paddle_pserver ${PSERVER_SOURCES}) -add_style_check_target(paddle_pserver ${PSERVER_HEADERS}) - add_dependencies(paddle_pserver paddle_proto ${external_project_dependencies}) set(PSERVER_MAIN_SOURCES diff --git a/paddle/pserver/LightNetwork.cpp b/paddle/legacy/pserver/LightNetwork.cpp similarity index 99% rename from paddle/pserver/LightNetwork.cpp rename to paddle/legacy/pserver/LightNetwork.cpp index 4c0da2217e..469c95853e 100644 --- a/paddle/pserver/LightNetwork.cpp +++ b/paddle/legacy/pserver/LightNetwork.cpp @@ -27,8 +27,8 @@ limitations under the License. */ #include "LightNetwork.h" #include "RDMANetwork.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" /// quick ack can reduce the latency of small message DEFINE_bool(small_messages, diff --git a/paddle/pserver/LightNetwork.h b/paddle/legacy/pserver/LightNetwork.h similarity index 96% rename from paddle/pserver/LightNetwork.h rename to paddle/legacy/pserver/LightNetwork.h index 2aaa26a5c7..380f86832f 100644 --- a/paddle/pserver/LightNetwork.h +++ b/paddle/legacy/pserver/LightNetwork.h @@ -21,7 +21,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Thread.h" struct sxi_socket; @@ -41,7 +41,7 @@ class SocketServer : public Thread { // rdmaCpu controls the cpu affinity of RDMA server daemon, // which could benifit performance. rdmaCpu = -1 means TCP // is used instead of RDMA transport. -public: + public: SocketServer(const std::string& addr, int port, int rdmaCpu); ~SocketServer(); @@ -50,7 +50,7 @@ public: typedef std::function& outputIovs)> ResponseCallback; -protected: + protected: // // The derived class needs to implement this function // to handle the request received by SocketWorker @@ -70,13 +70,13 @@ protected: friend class SocketWorker; -private: + private: void rdmaServer(); void tcpServer(); void detach() {} // detach accept thread is forbidden -protected: + protected: enum ChannelType tcpRdma_; // for rdma int rdmaCpu_; @@ -96,7 +96,7 @@ protected: * @note all parameter processing will run in the context of this worker */ class SocketWorker : public Thread { -public: + public: SocketWorker(std::unique_ptr&& channel, SocketServer* server) : channel_(std::move(channel)), server_(server) {} @@ -104,7 +104,7 @@ public: virtual void run(); -protected: + protected: std::unique_ptr channel_; SocketServer* server_; enum ChannelType tcpRdma_; @@ -118,12 +118,12 @@ protected: * single cpu core for better load balance performance */ class RdmaClientDaemons { -private: + private: RdmaClientDaemons(); static std::unique_ptr daemons_; -public: + public: static RdmaClientDaemons* get() { std::call_once(RdmaClientDaemons::initDataFlag_, &RdmaClientDaemons::getInstance); @@ -141,10 +141,10 @@ public: ~RdmaClientDaemons(); -public: + public: friend class SocketClient; -private: + private: static std::once_flag initDataFlag_; static void getInstance() { if (!daemons_.get()) daemons_.reset(new RdmaClientDaemons()); @@ -162,19 +162,19 @@ private: * read data */ class SocketClient { -public: + public: SocketClient(const std::string& serverAddr, int serverPort, enum ChannelType channelType); SocketChannel* getChannel() { return channel_.get(); } -protected: + protected: std::unique_ptr channel_; struct sxi_socket* socketDaemon_; enum ChannelType tcpRdma_; -private: + private: void RdmaClient(const std::string& serverAddr, int serverPort); void TcpClient(const std::string& serverAddr, int serverPort); }; diff --git a/paddle/pserver/ParameterClient2.cpp b/paddle/legacy/pserver/ParameterClient2.cpp similarity index 99% rename from paddle/pserver/ParameterClient2.cpp rename to paddle/legacy/pserver/ParameterClient2.cpp index 43e4902b0f..4c544ddc28 100644 --- a/paddle/pserver/ParameterClient2.cpp +++ b/paddle/legacy/pserver/ParameterClient2.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include #include "ParameterClient2.h" -#include "paddle/math/SparseRowMatrix.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/math/SparseRowMatrix.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" DEFINE_string(pservers, "127.0.0.1", "Comma separated addresses of pservers"); DEFINE_int32(parallel_thread_num, 1, "Thread number for parameter send"); diff --git a/paddle/pserver/ParameterClient2.h b/paddle/legacy/pserver/ParameterClient2.h similarity index 97% rename from paddle/pserver/ParameterClient2.h rename to paddle/legacy/pserver/ParameterClient2.h index d63273ccbc..9320e19c4d 100644 --- a/paddle/pserver/ParameterClient2.h +++ b/paddle/legacy/pserver/ParameterClient2.h @@ -19,15 +19,15 @@ limitations under the License. */ #include #include -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/pserver/BaseClient.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/pserver/BaseClient.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/Util.h" #include "ParameterService.pb.h" @@ -50,11 +50,11 @@ struct PServerVector { * @brief A class to help to prepare server-side operations. */ class PreparedOperations { -protected: + protected: class ResultsAdder; struct LocalOperationResult; -public: + public: /** * Offers an easy way to prepare operations that will be performed on * server-side. @@ -93,7 +93,7 @@ public: return ResultsAdder(&localResults_.back()); } -protected: + protected: void addOperationHelper(Operation* op) {} /** @@ -151,7 +151,7 @@ protected: * @brief ResultsAdder offers easy ways to quickly store operation results. */ class ResultsAdder { - public: + public: explicit ResultsAdder(LocalOperationResult* localResult) : localResult_(localResult) {} template @@ -172,11 +172,11 @@ protected: addResult(args...); } - protected: + protected: LocalOperationResult* localResult_; }; -protected: + protected: DoOperationRequest request_; std::vector inputIovs_; struct LocalOperationResult { @@ -214,7 +214,7 @@ struct ParameterSegments { * waiting until all parameters are received to CPU host end. */ class ParameterClient2 : public BaseClient { -public: + public: /** Constructor. * @param separate True if sending and recieving activities are separated * into 2 threads, otherwise false. @@ -232,7 +232,7 @@ public: static int calcParameterBlockSize(const std::vector& parameters, size_t serviceNum); -public: + public: bool init(const std::vector& parameters); /// service functions @@ -514,7 +514,7 @@ public: void setForwardbackwardTime(uint64_t delta) { forwardbackwordTime_ = delta; } #endif -protected: + protected: template void multiCall(const char* funcName, const ProtoIn& request, @@ -529,7 +529,7 @@ protected: } } -private: + private: void destroy(); /** @@ -573,7 +573,7 @@ private: /// start necessary threads for threadPool void initThreads(); -protected: + protected: /// start port number of pserver /// it deduce all ports for dense and sparse with some rules int port_; diff --git a/paddle/pserver/ParameterServer2.cpp b/paddle/legacy/pserver/ParameterServer2.cpp similarity index 98% rename from paddle/pserver/ParameterServer2.cpp rename to paddle/legacy/pserver/ParameterServer2.cpp index f8814714c2..8533a322d9 100644 --- a/paddle/pserver/ParameterServer2.cpp +++ b/paddle/legacy/pserver/ParameterServer2.cpp @@ -17,19 +17,19 @@ limitations under the License. */ #include #include -#include "paddle/math/SIMDFunctions.h" -#include "paddle/parameter/AverageOptimizer.h" -#include "paddle/parameter/FirstOrderOptimizer.h" -#include "paddle/parameter/OptimizerFunctions.h" -#include "paddle/parameter/OptimizerWithRegularizer.h" -#include "paddle/parameter/ParameterOptimizer.h" -#include "paddle/parameter/ParameterUpdateFunctions.h" -#include "paddle/parameter/Regularizer.h" -#include "paddle/parameter/ThreadLocalBuffer.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/math/SIMDFunctions.h" +#include "paddle/legacy/parameter/AverageOptimizer.h" +#include "paddle/legacy/parameter/FirstOrderOptimizer.h" +#include "paddle/legacy/parameter/OptimizerFunctions.h" +#include "paddle/legacy/parameter/OptimizerWithRegularizer.h" +#include "paddle/legacy/parameter/ParameterOptimizer.h" +#include "paddle/legacy/parameter/ParameterUpdateFunctions.h" +#include "paddle/legacy/parameter/Regularizer.h" +#include "paddle/legacy/parameter/ThreadLocalBuffer.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/StringUtil.h" DEFINE_int32(pserver_num_threads, 1, "number of threads for sync op exec"); DEFINE_double(async_lagged_ratio_min, diff --git a/paddle/pserver/ParameterServer2.h b/paddle/legacy/pserver/ParameterServer2.h similarity index 98% rename from paddle/pserver/ParameterServer2.h rename to paddle/legacy/pserver/ParameterServer2.h index 3ed06b6b04..069e730ea4 100644 --- a/paddle/pserver/ParameterServer2.h +++ b/paddle/legacy/pserver/ParameterServer2.h @@ -25,14 +25,14 @@ limitations under the License. */ #include #include -#include "paddle/math/Matrix.h" -#include "paddle/math/Vector.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/ParameterOptimizer.h" -#include "paddle/utils/Common.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/ThreadLocal.h" +#include "paddle/legacy/math/Matrix.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/parameter/ParameterOptimizer.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/ThreadLocal.h" #include "ParameterService.pb.h" @@ -71,7 +71,7 @@ namespace paddle { * to prevent from being polluted. */ class ParameterServer2 : public ProtoServer { -protected: + protected: /// parameter_ mutex. RWLock parameterMutex_; @@ -169,7 +169,7 @@ protected: template class ReadWriteBuffer : public std::vector> { - public: + public: static_assert(sizeof(T) % AlignBytes == 0 || AlignBytes % sizeof(T) == 0, "Type T must be able to aligned."); @@ -229,7 +229,7 @@ protected: return r; } - private: + private: size_t curOffset_; }; @@ -298,17 +298,17 @@ protected: /// barrier performance tuning sync-sgd required std::atomic batchId_; -public: + public: struct Buffer { real* base; size_t size; }; -protected: + protected: /// async gradient commit control bool asyncGrdientCommitCheckAndStat(const SendParameterRequest& request); -public: + public: /// disable default parameter for overloading /// @rdmaCpu:the id of cpu core hosting RDMA server(0-N) /// -1 means using TCP transport instead of RDMA @@ -437,7 +437,7 @@ public: void saveValueVector(const SaveValueRequest& request, ProtoResponseCallback callback); -public: + public: /** * @brief initialize parameter server */ @@ -512,7 +512,7 @@ public: SendParameterResponse* response, std::vector* outputBuffers); -protected: + protected: void mergeSegments(BlockSegments* segments); /// set the unused segments to zero @@ -641,7 +641,7 @@ protected: const VectorPtr vecs[], const ParameterOptimizer::TraverseCallback& callback); -public: + public: typedef void (ParameterServer2::*OperatorFunction)(const Operation& operation, OperationResult* result); diff --git a/paddle/pserver/ParameterServer2Main.cpp b/paddle/legacy/pserver/ParameterServer2Main.cpp similarity index 100% rename from paddle/pserver/ParameterServer2Main.cpp rename to paddle/legacy/pserver/ParameterServer2Main.cpp diff --git a/paddle/pserver/ParameterServerController.cpp b/paddle/legacy/pserver/ParameterServerController.cpp similarity index 100% rename from paddle/pserver/ParameterServerController.cpp rename to paddle/legacy/pserver/ParameterServerController.cpp diff --git a/paddle/pserver/ParameterServerController.h b/paddle/legacy/pserver/ParameterServerController.h similarity index 97% rename from paddle/pserver/ParameterServerController.h rename to paddle/legacy/pserver/ParameterServerController.h index 3a9bc74edf..b90d0cbcea 100644 --- a/paddle/pserver/ParameterServerController.h +++ b/paddle/legacy/pserver/ParameterServerController.h @@ -17,7 +17,7 @@ limitations under the License. */ #include "ParameterServer2.h" #include "ParameterServerConfig.pb.h" #include "RDMANetwork.h" -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" namespace paddle { @@ -28,7 +28,7 @@ namespace paddle { * by gflags or proto. */ class ParameterServerController final { -public: + public: DISABLE_COPY(ParameterServerController); /** @@ -67,7 +67,7 @@ public: */ void wait(); -private: + private: std::vector> parameterServers_; }; diff --git a/paddle/pserver/ProtoServer.cpp b/paddle/legacy/pserver/ProtoServer.cpp similarity index 100% rename from paddle/pserver/ProtoServer.cpp rename to paddle/legacy/pserver/ProtoServer.cpp diff --git a/paddle/pserver/ProtoServer.h b/paddle/legacy/pserver/ProtoServer.h similarity index 99% rename from paddle/pserver/ProtoServer.h rename to paddle/legacy/pserver/ProtoServer.h index 3f78799dbf..2943867de5 100644 --- a/paddle/pserver/ProtoServer.h +++ b/paddle/legacy/pserver/ProtoServer.h @@ -34,7 +34,7 @@ namespace paddle { * for single NIC hardward with --port=N(N>1) for small cluster job. */ class ProtoServer : public SocketServer { -public: + public: /// rdmaCpu controls the cpu affinity of RDMA server daemon, /// which could benifit performance. rdmaCpu = -1 means TCP /// is used instead of RDMA transport. @@ -87,7 +87,7 @@ public: std::unique_ptr msgReader, ProtoResponseCallbackEx callback)> func); -protected: + protected: /** * @brief handle rpc request * @param[in] msgReader Message reader for reading data from connection @@ -111,7 +111,7 @@ protected: void registerServiceFunctionImp(const std::string& funcName, ServiceFunction func); -protected: + protected: /// Tuning bare network overhead: the beginning of receiving request ThreadLocal handleRequestBegin_; @@ -120,7 +120,7 @@ protected: }; class ProtoClient : public SocketClient { -public: + public: ProtoClient(const std::string& serverAddr, int serverPort, enum ChannelType channelType = F_TCP) diff --git a/paddle/pserver/RDMANetwork.h b/paddle/legacy/pserver/RDMANetwork.h similarity index 98% rename from paddle/pserver/RDMANetwork.h rename to paddle/legacy/pserver/RDMANetwork.h index 83db6b9df7..c87056f72c 100644 --- a/paddle/pserver/RDMANetwork.h +++ b/paddle/legacy/pserver/RDMANetwork.h @@ -19,7 +19,7 @@ limitations under the License. */ #else #define PROMPT_ERR() LOG(FATAL) << "Paddle is not compiled with rdma" #endif -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" #include struct sxi_sock; diff --git a/paddle/pserver/SocketChannel.cpp b/paddle/legacy/pserver/SocketChannel.cpp similarity index 99% rename from paddle/pserver/SocketChannel.cpp rename to paddle/legacy/pserver/SocketChannel.cpp index 72e6943408..79c763c62b 100644 --- a/paddle/pserver/SocketChannel.cpp +++ b/paddle/legacy/pserver/SocketChannel.cpp @@ -22,7 +22,7 @@ limitations under the License. */ #include #include "RDMANetwork.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { diff --git a/paddle/pserver/SocketChannel.h b/paddle/legacy/pserver/SocketChannel.h similarity index 97% rename from paddle/pserver/SocketChannel.h rename to paddle/legacy/pserver/SocketChannel.h index c0f30d0db7..a7b3cd42f0 100644 --- a/paddle/pserver/SocketChannel.h +++ b/paddle/legacy/pserver/SocketChannel.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include @@ -33,7 +33,7 @@ enum ChannelType { /// reading a set of blocks of data from SocketChannel. class MsgReader { -public: + public: MsgReader(SocketChannel* channel, size_t numIovs); ~MsgReader() { /// ensure all data blocks have been processed @@ -75,7 +75,7 @@ public: void readBlocks(const std::vector& bufs); void readNextBlock(void* buf); -protected: + protected: SocketChannel* channel_; std::vector blockLengths_; size_t currentBlockIndex_; @@ -84,7 +84,7 @@ protected: /// APIs for reading and writing byte stream data or naive iov data /// from the APIs both RDMA and TCP exhibits byte stream style class SocketChannel { -public: + public: SocketChannel(int socket, const std::string& peerName) : tcpSocket_(socket), peerName_(peerName) { tcpRdma_ = F_TCP; @@ -137,7 +137,7 @@ public: /// return null to indicate socket is closed std::unique_ptr readMessage(); -protected: + protected: struct MessageHeader { int64_t totalLength; /// include the header int64_t numIovs; diff --git a/paddle/pserver/SparseParameterDistribution.cpp b/paddle/legacy/pserver/SparseParameterDistribution.cpp similarity index 97% rename from paddle/pserver/SparseParameterDistribution.cpp rename to paddle/legacy/pserver/SparseParameterDistribution.cpp index bb247f389c..3f17b228f0 100644 --- a/paddle/pserver/SparseParameterDistribution.cpp +++ b/paddle/legacy/pserver/SparseParameterDistribution.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Flags.h" +#include "paddle/legacy/utils/Flags.h" #include "SparseParameterDistribution.h" diff --git a/paddle/pserver/SparseParameterDistribution.h b/paddle/legacy/pserver/SparseParameterDistribution.h similarity index 96% rename from paddle/pserver/SparseParameterDistribution.h rename to paddle/legacy/pserver/SparseParameterDistribution.h index 13f199548d..ee78029958 100644 --- a/paddle/pserver/SparseParameterDistribution.h +++ b/paddle/legacy/pserver/SparseParameterDistribution.h @@ -16,7 +16,7 @@ limitations under the License. */ #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { @@ -31,7 +31,7 @@ namespace paddle { * if unbalanced distribution exhibts by default. */ class SparseParameterDistribution { -public: + public: /// serviceNum means the number of ParameterServers explicit SparseParameterDistribution(size_t serviceNum); ~SparseParameterDistribution() {} @@ -39,7 +39,7 @@ public: void probeDistribution(int serverId, size_t data); void checkAndResetDistribution(); -private: + private: std::vector data_; std::atomic totBytes_; diff --git a/paddle/pserver/test/.gitignore b/paddle/legacy/pserver/test/.gitignore similarity index 100% rename from paddle/pserver/test/.gitignore rename to paddle/legacy/pserver/test/.gitignore diff --git a/paddle/pserver/test/CMakeLists.txt b/paddle/legacy/pserver/test/CMakeLists.txt similarity index 100% rename from paddle/pserver/test/CMakeLists.txt rename to paddle/legacy/pserver/test/CMakeLists.txt diff --git a/paddle/pserver/test/SocketTest.cpp b/paddle/legacy/pserver/test/SocketTest.cpp similarity index 96% rename from paddle/pserver/test/SocketTest.cpp rename to paddle/legacy/pserver/test/SocketTest.cpp index 6019dccaad..3a781fcbf6 100644 --- a/paddle/pserver/test/SocketTest.cpp +++ b/paddle/legacy/pserver/test/SocketTest.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include @@ -22,20 +22,20 @@ limitations under the License. */ #include -#include "paddle/math/Vector.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/utils/Logging.h" struct MessageHeader { int64_t dataLength; }; class Thread { -public: + public: void start(); virtual void run() = 0; virtual ~Thread() {} -protected: + protected: std::unique_ptr thread_; }; @@ -44,13 +44,13 @@ void Thread::start() { } class SocketChannel { -public: + public: explicit SocketChannel(int socket) : socket_(socket) {} int getSocketFd() const { return socket_; } uint64_t readAll(void* buf, size_t size); uint64_t writeAll(const void* buf, size_t size); -protected: + protected: int socket_; }; @@ -79,7 +79,7 @@ uint64_t SocketChannel::writeAll(const void* buf, size_t size) { } class SocketWorker : public Thread { -public: + public: explicit SocketWorker(int socket) : channel_(socket) {} virtual void run(); @@ -88,19 +88,19 @@ public: // write n bytes -protected: + protected: SocketChannel channel_; std::string buffer_; }; class SocketServer : public Thread { -public: + public: explicit SocketServer(int port) : port_(port), socket_(0), maxPendingConnections_(100) {} virtual void run(); -protected: + protected: int port_; int socket_; int maxPendingConnections_; @@ -161,11 +161,11 @@ void SocketWorker::run() { } class SocketClient { -public: + public: SocketClient(const std::string& serverAddr, int serverPort); SocketChannel* getChannel() const { return channel_.get(); } -protected: + protected: std::unique_ptr channel_; }; diff --git a/paddle/pserver/test/test_ParameterServer2.cpp b/paddle/legacy/pserver/test/test_ParameterServer2.cpp similarity index 98% rename from paddle/pserver/test/test_ParameterServer2.cpp rename to paddle/legacy/pserver/test/test_ParameterServer2.cpp index e742cd0871..542e80e046 100644 --- a/paddle/pserver/test/test_ParameterServer2.cpp +++ b/paddle/legacy/pserver/test/test_ParameterServer2.cpp @@ -13,10 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include -#include -#include -#include +#include +#include +#include +#include using namespace paddle; // NOLINT using namespace std; // NOLINT @@ -26,7 +26,7 @@ DEFINE_string(server_addr, "127.0.0.1", "assign server address"); DEFINE_int32(server_cpu, 0, "assign server cpu"); class ParameterServer2Tester : public ParameterServer2 { -public: + public: ParameterServer2Tester(std::string serverAddr, int port, int rdmaCpu = -1, @@ -88,7 +88,7 @@ public: void waitPassFinishTest(); void synchronizeTest(); -protected: + protected: ParameterClient2 client_; vector clientConfigs_; vector parameters_; diff --git a/paddle/pserver/test/test_ProtoServer.cpp b/paddle/legacy/pserver/test/test_ProtoServer.cpp similarity index 96% rename from paddle/pserver/test/test_ProtoServer.cpp rename to paddle/legacy/pserver/test/test_ProtoServer.cpp index d68a8d2180..f7ab2e8af4 100644 --- a/paddle/pserver/test/test_ProtoServer.cpp +++ b/paddle/legacy/pserver/test/test_ProtoServer.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include #include #include "ParameterService.pb.h" -#include "paddle/math/Vector.h" -#include "paddle/pserver/ProtoServer.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/math/Vector.h" +#include "paddle/legacy/pserver/ProtoServer.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" DEFINE_string(server_addr, "127.0.0.1", "Server address"); DEFINE_int64(dim, 50000000, "Data size"); @@ -28,7 +28,7 @@ DEFINE_bool(benchmark, false, "Do benchmark. Skip some tests"); using namespace paddle; // NOLINT class MyServer : public ProtoServer { -public: + public: explicit MyServer(int port, int rdmaCpu = -1) : ProtoServer(FLAGS_server_addr, port, rdmaCpu), status_(PSERVER_STATUS_NOT_SET) { @@ -62,7 +62,7 @@ public: callback(response); } -protected: + protected: PServerStatus status_; std::string buffer_; }; diff --git a/paddle/pserver/test/test_ProtoServer.sh b/paddle/legacy/pserver/test/test_ProtoServer.sh similarity index 94% rename from paddle/pserver/test/test_ProtoServer.sh rename to paddle/legacy/pserver/test/test_ProtoServer.sh index 970c90b494..1439350847 100755 --- a/paddle/pserver/test/test_ProtoServer.sh +++ b/paddle/legacy/pserver/test/test_ProtoServer.sh @@ -19,7 +19,7 @@ do if [ $port_used_num -eq 0 ] then echo $port; - pserver/test/test_ProtoServer --port=$port + legacy/pserver/test/test_ProtoServer --port=$port if [ $? -eq 0 ] then exit 0 diff --git a/paddle/trainer/CMakeLists.txt b/paddle/legacy/trainer/CMakeLists.txt similarity index 91% rename from paddle/trainer/CMakeLists.txt rename to paddle/legacy/trainer/CMakeLists.txt index 72911695bd..6192de4388 100644 --- a/paddle/trainer/CMakeLists.txt +++ b/paddle/legacy/trainer/CMakeLists.txt @@ -36,17 +36,12 @@ endif() add_library(paddle_trainer_lib STATIC ${TRAINER_SOURCES}) -add_style_check_target(paddle_trainer_lib - ${TRAINER_SOURCES}) -add_style_check_target(paddle_trainer_lib - ${TRAINER_HEADERS}) add_dependencies(paddle_trainer_lib paddle_proto ${external_project_dependencies}) macro(add_paddle_exe TARGET_NAME) add_executable(${TARGET_NAME} ${ARGN}) - add_style_check_target(${TARGET_NAME} ${ARGN}) link_paddle_exe(${TARGET_NAME}) endmacro() diff --git a/paddle/trainer/MergeModel.cpp b/paddle/legacy/trainer/MergeModel.cpp similarity index 95% rename from paddle/trainer/MergeModel.cpp rename to paddle/legacy/trainer/MergeModel.cpp index 56c38015fb..8a3601f192 100644 --- a/paddle/trainer/MergeModel.cpp +++ b/paddle/legacy/trainer/MergeModel.cpp @@ -16,8 +16,8 @@ limitations under the License. */ #include "ParamUtil.h" #include "Trainer.h" -#include "paddle/pserver/ParameterServer2.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/pserver/ParameterServer2.h" +#include "paddle/legacy/utils/PythonUtil.h" DEFINE_string(model_dir, "", "Directory for separated model files"); DEFINE_string(config_file, "", "Config file for the model"); diff --git a/paddle/trainer/NewRemoteParameterUpdater.cpp b/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp similarity index 99% rename from paddle/trainer/NewRemoteParameterUpdater.cpp rename to paddle/legacy/trainer/NewRemoteParameterUpdater.cpp index 410ac6d95c..cdd832acd1 100644 --- a/paddle/trainer/NewRemoteParameterUpdater.cpp +++ b/paddle/legacy/trainer/NewRemoteParameterUpdater.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "NewRemoteParameterUpdater.h" #include "Trainer.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_int32(trainer_id); DECLARE_string(save_dir); diff --git a/paddle/trainer/NewRemoteParameterUpdater.h b/paddle/legacy/trainer/NewRemoteParameterUpdater.h similarity index 95% rename from paddle/trainer/NewRemoteParameterUpdater.h rename to paddle/legacy/trainer/NewRemoteParameterUpdater.h index 6223ba427c..707e9ceb9b 100644 --- a/paddle/trainer/NewRemoteParameterUpdater.h +++ b/paddle/legacy/trainer/NewRemoteParameterUpdater.h @@ -19,9 +19,9 @@ limitations under the License. */ #include "OptimizerConfig.pb.h" #include "ParameterUpdater.h" #include "libpaddle_pserver_cclient.h" -#include "paddle/pserver/ParameterClient2.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/pserver/ParameterClient2.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -29,7 +29,7 @@ namespace paddle { * New remote parameter updater for dense parameters that use cclient of go. */ class NewRemoteParameterUpdater : public ParameterUpdater { -public: + public: NewRemoteParameterUpdater(const OptimizationConfig& config, const std::string pserverSpec); NewRemoteParameterUpdater(const OptimizationConfig& config, @@ -61,13 +61,13 @@ public: virtual void startPass(); virtual bool finishPass(); -protected: + protected: /** * work need to do after finishBatch */ virtual void updateImpl(Parameter* para); -private: + private: int parameterSize() { return (int)parameters_.size(); } /** @@ -104,7 +104,7 @@ private: } } -protected: + protected: const OptimizationConfig& trainerConfig_; /// internal parameter client object for exchanging data with pserver paddle_pserver_client parameterClient_; diff --git a/paddle/trainer/ParamUtil.cpp b/paddle/legacy/trainer/ParamUtil.cpp similarity index 93% rename from paddle/trainer/ParamUtil.cpp rename to paddle/legacy/trainer/ParamUtil.cpp index ffbca42e10..b5aba32dee 100644 --- a/paddle/trainer/ParamUtil.cpp +++ b/paddle/legacy/trainer/ParamUtil.cpp @@ -23,16 +23,16 @@ limitations under the License. */ #include #include -#include +#include -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "TesterConfig.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/gserver/layers/ValidationLayer.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/gserver/layers/ValidationLayer.h" namespace paddle { diff --git a/paddle/trainer/ParamUtil.h b/paddle/legacy/trainer/ParamUtil.h similarity index 95% rename from paddle/trainer/ParamUtil.h rename to paddle/legacy/trainer/ParamUtil.h index 2e05595848..0778696776 100644 --- a/paddle/trainer/ParamUtil.h +++ b/paddle/legacy/trainer/ParamUtil.h @@ -14,13 +14,13 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "hl_gpu.h" -#include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/dataproviders/DataProvider.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include #include @@ -56,7 +56,7 @@ struct ParameterUtilConfig { * Utility class for loading and saving parameters */ class ParameterUtil { -public: + public: /** * Ctor. * @@ -115,7 +115,7 @@ public: } } -private: + private: std::shared_ptr config_; std::unique_ptr intConfig_; GradientMachinePtr gserver_; diff --git a/paddle/trainer/ParameterUpdater.cpp b/paddle/legacy/trainer/ParameterUpdater.cpp similarity index 98% rename from paddle/trainer/ParameterUpdater.cpp rename to paddle/legacy/trainer/ParameterUpdater.cpp index 4e9e890c85..549fb0332d 100644 --- a/paddle/trainer/ParameterUpdater.cpp +++ b/paddle/legacy/trainer/ParameterUpdater.cpp @@ -14,9 +14,9 @@ limitations under the License. */ #include "ParameterUpdater.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/utils/Thread.h" namespace paddle { diff --git a/paddle/trainer/ParameterUpdater.h b/paddle/legacy/trainer/ParameterUpdater.h similarity index 93% rename from paddle/trainer/ParameterUpdater.h rename to paddle/legacy/trainer/ParameterUpdater.h index 9e9e948b88..acddc3702d 100644 --- a/paddle/trainer/ParameterUpdater.h +++ b/paddle/legacy/trainer/ParameterUpdater.h @@ -14,18 +14,18 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Thread.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Thread.h" +#include "paddle/legacy/utils/Util.h" -#include "paddle/parameter/AverageOptimizer.h" -#include "paddle/parameter/FirstOrderOptimizer.h" -#include "paddle/parameter/OptimizerFunctions.h" -#include "paddle/parameter/OptimizerWithRegularizer.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/ParameterUpdaterBase.h" +#include "paddle/legacy/parameter/AverageOptimizer.h" +#include "paddle/legacy/parameter/FirstOrderOptimizer.h" +#include "paddle/legacy/parameter/OptimizerFunctions.h" +#include "paddle/legacy/parameter/OptimizerWithRegularizer.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/parameter/ParameterUpdaterBase.h" #include "TrainerConfig.pb.h" -#include "paddle/gserver/layers/Layer.h" +#include "paddle/legacy/gserver/layers/Layer.h" #include #include @@ -36,7 +36,7 @@ namespace paddle { * @brief Parameter Updater for SGD, and local(not cluster) run. */ class SgdLocalUpdater : public ParameterUpdater { -public: + public: /** * @brief Ctor. Initialize optimizer locally by optConfig. * @param optConfig optimization config. @@ -131,7 +131,7 @@ public: } } -protected: + protected: /** * @brief update method. Update value from gradient. * @param para parameter that will be updated. @@ -159,7 +159,7 @@ protected: * @deprecated */ class SgdCpuUpdater : public SgdLocalUpdater, public Deprecated { -public: + public: explicit SgdCpuUpdater(const OptimizationConfig& optConfig) : SgdLocalUpdater(optConfig), Deprecated( @@ -178,7 +178,7 @@ public: optimizer_->finishBatch(); } -protected: + protected: /** * @brief do nothing. * @param para @@ -192,7 +192,7 @@ protected: * It will do model average in cpu to reduce gpu memory comsuption. */ class SgdUpdaterWithCpuAverager : public SgdLocalUpdater { -public: + public: /** * @brief Ctor. * @@ -233,12 +233,12 @@ public: */ virtual void restore(); -protected: + protected: virtual void updateImpl(Parameter* para); void updateFunc(Parameter* para); -protected: + protected: std::unique_ptr averager_; /** diff --git a/paddle/trainer/RemoteParameterUpdater.cpp b/paddle/legacy/trainer/RemoteParameterUpdater.cpp similarity index 99% rename from paddle/trainer/RemoteParameterUpdater.cpp rename to paddle/legacy/trainer/RemoteParameterUpdater.cpp index 7314266cb2..5de1cc7827 100644 --- a/paddle/trainer/RemoteParameterUpdater.cpp +++ b/paddle/legacy/trainer/RemoteParameterUpdater.cpp @@ -14,8 +14,8 @@ limitations under the License. */ #include "RemoteParameterUpdater.h" #include "Trainer.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/Stat.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/Stat.h" DECLARE_int32(trainer_id); DECLARE_string(save_dir); diff --git a/paddle/trainer/RemoteParameterUpdater.h b/paddle/legacy/trainer/RemoteParameterUpdater.h similarity index 98% rename from paddle/trainer/RemoteParameterUpdater.h rename to paddle/legacy/trainer/RemoteParameterUpdater.h index 5e82c94475..6846853298 100644 --- a/paddle/trainer/RemoteParameterUpdater.h +++ b/paddle/legacy/trainer/RemoteParameterUpdater.h @@ -17,9 +17,9 @@ limitations under the License. */ #include #include #include "ParameterUpdater.h" -#include "paddle/pserver/ParameterClient2.h" -#include "paddle/utils/Queue.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/pserver/ParameterClient2.h" +#include "paddle/legacy/utils/Queue.h" +#include "paddle/legacy/utils/Util.h" namespace paddle { @@ -53,7 +53,7 @@ namespace paddle { * backward and communication is not supported. */ class RemoteParameterUpdater : public ParameterUpdater { -public: + public: RemoteParameterUpdater( const OptimizationConfig& config, int expectedPassCount, @@ -101,7 +101,7 @@ public: virtual void apply(); virtual void restore(); -protected: + protected: /** * control all pservers with all trainers for sync-sgd */ @@ -128,7 +128,7 @@ protected: */ void copyParametersFromDevice(ParameterType parameterType); -protected: + protected: /// Optimization config used to guide initialization and finishBatch OptimizationConfig config_; /// internal parameter client object for exchanging data with pserver @@ -178,7 +178,7 @@ protected: * It contains separate send and recv thread for pipeline usage. */ class ConcurrentRemoteParameterUpdater : public RemoteParameterUpdater { -public: + public: ConcurrentRemoteParameterUpdater( OptimizationConfig config, int expectedPassCount, @@ -194,7 +194,7 @@ public: */ virtual void finishBatch(real cost); -protected: + protected: virtual void updateImpl(Parameter* para); /// internal thread called in send thread void send(Parameter* para); // para == NULL indicate end of a minibatch @@ -221,7 +221,7 @@ protected: return (numBatches_ + 1) % config_.num_batches_per_send_parameter() == 0; } -private: + private: /// send thread used for overlapping std::unique_ptr sendThread_; /// recv thread used for overlapping @@ -263,7 +263,7 @@ private: * to encapsulate sparse specified message for all pservers. */ class SparseRemoteParameterUpdater : public ParameterUpdater { -public: + public: SparseRemoteParameterUpdater(const OptimizationConfig& config, int expectedPassCount, bool testing); @@ -303,7 +303,7 @@ public: } #endif -protected: + protected: /// update implimentation, not implemented virtual void updateImpl(Parameter* para) {} @@ -313,7 +313,7 @@ protected: /// start controller thread void startController(); -protected: + protected: /// optimization config OptimizationConfig config_; /// internal parameter client @@ -335,7 +335,7 @@ protected: * it directly call internal dense and sparse udpater individually. */ class SparseRemoteParameterUpdaterComposite : public ParameterUpdaterComposite { -public: + public: enum { UPDATER_SPARSE_REMOTE = 0, // execute in sync thread pool(tid:0) UPDATER_NORMAL = 1, // execute in Owner thread(tid:1) @@ -364,7 +364,7 @@ public: }; class ParameterUpdaterCreators { -public: + public: /** * @brief add a creator to create custom ParameterUpdater while training. * The creator is a function with type (alogrithm, optConfig, isLocal, @@ -407,7 +407,7 @@ public: return nullptr; } -private: + private: static std::vector> constructors_; diff --git a/paddle/trainer/Tester.cpp b/paddle/legacy/trainer/Tester.cpp similarity index 97% rename from paddle/trainer/Tester.cpp rename to paddle/legacy/trainer/Tester.cpp index 16e676d602..d977ca9657 100644 --- a/paddle/trainer/Tester.cpp +++ b/paddle/legacy/trainer/Tester.cpp @@ -24,15 +24,15 @@ limitations under the License. */ #include -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "TesterConfig.h" -#include "paddle/gserver/gradientmachines/GradientMachineMode.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/gserver/layers/ValidationLayer.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachineMode.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/gserver/layers/ValidationLayer.h" namespace paddle { diff --git a/paddle/trainer/Tester.h b/paddle/legacy/trainer/Tester.h similarity index 95% rename from paddle/trainer/Tester.h rename to paddle/legacy/trainer/Tester.h index e892744db2..a298602d1d 100644 --- a/paddle/trainer/Tester.h +++ b/paddle/legacy/trainer/Tester.h @@ -14,13 +14,13 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "hl_gpu.h" -#include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/dataproviders/DataProvider.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "TrainerConfig.pb.h" @@ -38,7 +38,7 @@ namespace paddle { * It is a private class for Trainer. */ class Tester { -public: + public: /** * Ctor * @param config Trainer Config. @@ -87,7 +87,7 @@ public: */ void test(); -protected: + protected: std::shared_ptr testParameterClient_; std::shared_ptr config_; std::unique_ptr intconfig_; @@ -107,7 +107,7 @@ protected: real cost; } testContext_; -private: + private: /** * Test one batch by batchId. It is only used for testOnePass. * diff --git a/paddle/trainer/TesterConfig.h b/paddle/legacy/trainer/TesterConfig.h similarity index 94% rename from paddle/trainer/TesterConfig.h rename to paddle/legacy/trainer/TesterConfig.h index 68d4c931ff..6c78f7cda3 100644 --- a/paddle/trainer/TesterConfig.h +++ b/paddle/legacy/trainer/TesterConfig.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "hl_gpu.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "TrainerConfig.pb.h" diff --git a/paddle/trainer/ThreadParameterUpdater.cpp b/paddle/legacy/trainer/ThreadParameterUpdater.cpp similarity index 98% rename from paddle/trainer/ThreadParameterUpdater.cpp rename to paddle/legacy/trainer/ThreadParameterUpdater.cpp index 3c85c3aaac..0601bdf24e 100644 --- a/paddle/trainer/ThreadParameterUpdater.cpp +++ b/paddle/legacy/trainer/ThreadParameterUpdater.cpp @@ -14,11 +14,11 @@ limitations under the License. */ #include "ThreadParameterUpdater.h" -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" -#include "paddle/math/SparseRowMatrix.h" -#include "paddle/parameter/ThreadLocalBuffer.h" -#include "paddle/utils/Thread.h" +#include "paddle/legacy/math/SparseRowMatrix.h" +#include "paddle/legacy/parameter/ThreadLocalBuffer.h" +#include "paddle/legacy/utils/Thread.h" DECLARE_int32(trainer_count); diff --git a/paddle/trainer/ThreadParameterUpdater.h b/paddle/legacy/trainer/ThreadParameterUpdater.h similarity index 88% rename from paddle/trainer/ThreadParameterUpdater.h rename to paddle/legacy/trainer/ThreadParameterUpdater.h index bc08a9e9f0..172287d4eb 100644 --- a/paddle/trainer/ThreadParameterUpdater.h +++ b/paddle/legacy/trainer/ThreadParameterUpdater.h @@ -14,13 +14,13 @@ limitations under the License. */ #pragma once -#include "paddle/parameter/AverageOptimizer.h" -#include "paddle/parameter/FirstOrderOptimizer.h" -#include "paddle/parameter/OptimizerFunctions.h" -#include "paddle/parameter/OptimizerWithRegularizer.h" -#include "paddle/parameter/Parameter.h" -#include "paddle/parameter/Regularizer.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/parameter/AverageOptimizer.h" +#include "paddle/legacy/parameter/FirstOrderOptimizer.h" +#include "paddle/legacy/parameter/OptimizerFunctions.h" +#include "paddle/legacy/parameter/OptimizerWithRegularizer.h" +#include "paddle/legacy/parameter/Parameter.h" +#include "paddle/legacy/parameter/Regularizer.h" +#include "paddle/legacy/utils/Util.h" #include #include @@ -39,7 +39,7 @@ namespace paddle { class. */ class SgdThreadUpdater : public ParameterUpdater { -public: + public: explicit SgdThreadUpdater(const OptimizationConfig& optConfig); virtual ~SgdThreadUpdater() {} @@ -57,7 +57,7 @@ public: virtual void apply(); virtual void restore(); -protected: + protected: // This is the function that will be eventualy called by the GradientMachine. // used only for GPU update. virtual void updateImpl(Parameter* para); diff --git a/paddle/trainer/Trainer.cpp b/paddle/legacy/trainer/Trainer.cpp similarity index 98% rename from paddle/trainer/Trainer.cpp rename to paddle/legacy/trainer/Trainer.cpp index 3e4a2b5fa8..2db754793c 100644 --- a/paddle/trainer/Trainer.cpp +++ b/paddle/legacy/trainer/Trainer.cpp @@ -23,19 +23,19 @@ limitations under the License. */ #include -#include "paddle/utils/Common.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Common.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "RemoteParameterUpdater.h" #include "TesterConfig.h" #include "ThreadParameterUpdater.h" #include "TrainerConfigHelper.h" -#include "paddle/gserver/gradientmachines/GradientMachineMode.h" -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/gserver/layers/ValidationLayer.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachineMode.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/gserver/layers/ValidationLayer.h" DEFINE_string(config, "", "Trainer config file"); diff --git a/paddle/trainer/Trainer.h b/paddle/legacy/trainer/Trainer.h similarity index 95% rename from paddle/trainer/Trainer.h rename to paddle/legacy/trainer/Trainer.h index fac589d1d7..b467f9af0c 100644 --- a/paddle/trainer/Trainer.h +++ b/paddle/legacy/trainer/Trainer.h @@ -14,13 +14,13 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "hl_gpu.h" -#include "paddle/gserver/dataproviders/DataProvider.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/dataproviders/DataProvider.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include #include @@ -41,7 +41,7 @@ namespace paddle { * train/test a NeuralNetwork. */ class Trainer { -public: + public: /** * Ctor. * @return @@ -138,7 +138,7 @@ public: */ ParameterUtil* getParameterUtilPtr(); -protected: + protected: /** * Train one pass of data. * @@ -159,10 +159,10 @@ protected: void createTester(); -private: + private: std::unique_ptr createTesterConfig(); -protected: + protected: std::shared_ptr config_; std::shared_ptr stats_; diff --git a/paddle/trainer/TrainerBenchmark.cpp b/paddle/legacy/trainer/TrainerBenchmark.cpp similarity index 96% rename from paddle/trainer/TrainerBenchmark.cpp rename to paddle/legacy/trainer/TrainerBenchmark.cpp index 173653c816..7f5bd23354 100644 --- a/paddle/trainer/TrainerBenchmark.cpp +++ b/paddle/legacy/trainer/TrainerBenchmark.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #undef PADDLE_DISABLE_TIMER #include "Trainer.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" DECLARE_int32(test_period); diff --git a/paddle/trainer/TrainerConfigHelper.cpp b/paddle/legacy/trainer/TrainerConfigHelper.cpp similarity index 98% rename from paddle/trainer/TrainerConfigHelper.cpp rename to paddle/legacy/trainer/TrainerConfigHelper.cpp index 2b68d89e48..4d31ba8d71 100644 --- a/paddle/trainer/TrainerConfigHelper.cpp +++ b/paddle/legacy/trainer/TrainerConfigHelper.cpp @@ -15,8 +15,8 @@ limitations under the License. */ #include "TrainerConfigHelper.h" #include "ParamUtil.h" #include "TrainerConfig.pb.h" -#include "paddle/utils/Flags.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/utils/Flags.h" +#include "paddle/legacy/utils/PythonUtil.h" DECLARE_string(config); DECLARE_string(init_model_path); diff --git a/paddle/trainer/TrainerConfigHelper.h b/paddle/legacy/trainer/TrainerConfigHelper.h similarity index 98% rename from paddle/trainer/TrainerConfigHelper.h rename to paddle/legacy/trainer/TrainerConfigHelper.h index f1366cc041..0e428bea2c 100644 --- a/paddle/trainer/TrainerConfigHelper.h +++ b/paddle/legacy/trainer/TrainerConfigHelper.h @@ -14,8 +14,8 @@ limitations under the License. */ #pragma once -#include -#include +#include +#include #include namespace paddle { @@ -37,7 +37,7 @@ class DataConfig; * Define a macro to unify 'final' keyword */ class TrainerConfigHelper /*final*/ { -public: + public: DISABLE_COPY(TrainerConfigHelper); /** @@ -193,7 +193,7 @@ public: */ static std::shared_ptr createFromFlagConfig(); -private: + private: static std::string getConfigNameFromPassId(int passId, const std::string& modelPath); diff --git a/paddle/trainer/TrainerInternal.cpp b/paddle/legacy/trainer/TrainerInternal.cpp similarity index 97% rename from paddle/trainer/TrainerInternal.cpp rename to paddle/legacy/trainer/TrainerInternal.cpp index 4c5d4a0913..ee3dea6340 100644 --- a/paddle/trainer/TrainerInternal.cpp +++ b/paddle/legacy/trainer/TrainerInternal.cpp @@ -24,12 +24,12 @@ limitations under the License. */ #include -#include "paddle/gserver/gradientmachines/NeuralNetwork.h" -#include "paddle/gserver/layers/ValidationLayer.h" -#include "paddle/utils/GlobalConstants.h" -#include "paddle/utils/PythonUtil.h" -#include "paddle/utils/Stat.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/gserver/gradientmachines/NeuralNetwork.h" +#include "paddle/legacy/gserver/layers/ValidationLayer.h" +#include "paddle/legacy/utils/GlobalConstants.h" +#include "paddle/legacy/utils/PythonUtil.h" +#include "paddle/legacy/utils/Stat.h" +#include "paddle/legacy/utils/Util.h" #include "RemoteParameterUpdater.h" #include "ThreadParameterUpdater.h" diff --git a/paddle/trainer/TrainerInternal.h b/paddle/legacy/trainer/TrainerInternal.h similarity index 96% rename from paddle/trainer/TrainerInternal.h rename to paddle/legacy/trainer/TrainerInternal.h index 7018faab24..93919a68fc 100644 --- a/paddle/trainer/TrainerInternal.h +++ b/paddle/legacy/trainer/TrainerInternal.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include @@ -25,7 +25,7 @@ limitations under the License. */ #include "TrainerConfigHelper.h" #include "TrainerInternalConfig.h" #include "hl_gpu.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" namespace paddle { @@ -34,7 +34,7 @@ namespace paddle { * the core training class for driving training logic */ class TrainerInternal { -public: + public: struct ParaStat { real maxAbsGrad; real avgAbsGrad; @@ -126,7 +126,7 @@ public: UpdateCallback updateCallback, bool doPipelineUpdate); -protected: + protected: std::shared_ptr parameterUpdater_; GradientMachinePtr gradientMachine_; std::shared_ptr config_; diff --git a/paddle/trainer/TrainerInternalConfig.cpp b/paddle/legacy/trainer/TrainerInternalConfig.cpp similarity index 100% rename from paddle/trainer/TrainerInternalConfig.cpp rename to paddle/legacy/trainer/TrainerInternalConfig.cpp diff --git a/paddle/trainer/TrainerInternalConfig.h b/paddle/legacy/trainer/TrainerInternalConfig.h similarity index 97% rename from paddle/trainer/TrainerInternalConfig.h rename to paddle/legacy/trainer/TrainerInternalConfig.h index b47692720e..b91b539323 100644 --- a/paddle/trainer/TrainerInternalConfig.h +++ b/paddle/legacy/trainer/TrainerInternalConfig.h @@ -14,12 +14,12 @@ limitations under the License. */ #pragma once -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #include #include "hl_gpu.h" -#include "paddle/gserver/gradientmachines/GradientMachine.h" +#include "paddle/legacy/gserver/gradientmachines/GradientMachine.h" #include "TrainerConfig.pb.h" @@ -37,7 +37,7 @@ namespace paddle { * through one mini-batch. */ class TrainerStats { -public: + public: /** * @brief reset all stats. * @@ -147,7 +147,7 @@ public: return os.str(); } -private: + private: int64_t numProcessed_; real totalCost_; real currentCost_; diff --git a/paddle/trainer/TrainerMain.cpp b/paddle/legacy/trainer/TrainerMain.cpp similarity index 94% rename from paddle/trainer/TrainerMain.cpp rename to paddle/legacy/trainer/TrainerMain.cpp index c5c1d484e5..911aeba192 100644 --- a/paddle/trainer/TrainerMain.cpp +++ b/paddle/legacy/trainer/TrainerMain.cpp @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/pserver/ParameterServerController.h" -#include "paddle/utils/PythonUtil.h" +#include "paddle/legacy/pserver/ParameterServerController.h" +#include "paddle/legacy/utils/PythonUtil.h" #include "ParamUtil.h" #include "Trainer.h" diff --git a/paddle/trainer/tests/.gitignore b/paddle/legacy/trainer/tests/.gitignore similarity index 100% rename from paddle/trainer/tests/.gitignore rename to paddle/legacy/trainer/tests/.gitignore diff --git a/paddle/trainer/tests/CMakeLists.txt b/paddle/legacy/trainer/tests/CMakeLists.txt similarity index 89% rename from paddle/trainer/tests/CMakeLists.txt rename to paddle/legacy/trainer/tests/CMakeLists.txt index 12c9ea8cef..08548bea4c 100644 --- a/paddle/trainer/tests/CMakeLists.txt +++ b/paddle/legacy/trainer/tests/CMakeLists.txt @@ -5,7 +5,7 @@ add_custom_target(copy_trainer_conf ALL DEPENDS sample_trainer_config.conf) set(PYTHON_PATH ${PADDLE_SOURCE_DIR}/paddle/.set_python_path.sh -d - ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/trainer/tests) + ${PADDLE_BINARY_DIR}/python/:${PADDLE_BINARY_DIR}/paddle/legacy/trainer/tests) function(trainer_test TARGET) add_unittest_without_exec(${TARGET} ${TARGET}.cpp) add_test(NAME ${TARGET} @@ -33,5 +33,5 @@ endif() #################### test_config_parser ######################### add_test(NAME test_config_parser COMMAND ${PYTHON_PATH} ${PYTHON_EXECUTABLE} - ${PADDLE_SOURCE_DIR}/paddle/trainer/tests/config_parser_test.py + ${PADDLE_SOURCE_DIR}/paddle/legacy/trainer/tests/config_parser_test.py WORKING_DIRECTORY ${PADDLE_BINARY_DIR}/paddle/) diff --git a/paddle/trainer/tests/__init__.py b/paddle/legacy/trainer/tests/__init__.py similarity index 100% rename from paddle/trainer/tests/__init__.py rename to paddle/legacy/trainer/tests/__init__.py diff --git a/paddle/trainer/tests/config_parser_test.py b/paddle/legacy/trainer/tests/config_parser_test.py similarity index 77% rename from paddle/trainer/tests/config_parser_test.py rename to paddle/legacy/trainer/tests/config_parser_test.py index db66ebb5b7..0d3d82cbda 100644 --- a/paddle/trainer/tests/config_parser_test.py +++ b/paddle/legacy/trainer/tests/config_parser_test.py @@ -15,8 +15,9 @@ from paddle.trainer.config_parser import parse_config_and_serialize if __name__ == '__main__': - parse_config_and_serialize('trainer/tests/test_config.conf', '') + parse_config_and_serialize('legacy/trainer/tests/test_config.conf', '') parse_config_and_serialize( - 'trainer/tests/sample_trainer_config.conf', + 'legacy/trainer/tests/sample_trainer_config.conf', 'extension_module_name=paddle.trainer.config_parser_extension') - parse_config_and_serialize('gserver/tests/pyDataProvider/trainer.conf', '') + parse_config_and_serialize( + 'legacy/gserver/tests/pyDataProvider/trainer.conf', '') diff --git a/paddle/trainer/tests/fake_file_list.list b/paddle/legacy/trainer/tests/fake_file_list.list similarity index 100% rename from paddle/trainer/tests/fake_file_list.list rename to paddle/legacy/trainer/tests/fake_file_list.list diff --git a/paddle/trainer/tests/picojson.h b/paddle/legacy/trainer/tests/picojson.h similarity index 99% rename from paddle/trainer/tests/picojson.h rename to paddle/legacy/trainer/tests/picojson.h index eaa8b9baf6..75349537b1 100644 --- a/paddle/trainer/tests/picojson.h +++ b/paddle/legacy/trainer/tests/picojson.h @@ -125,7 +125,7 @@ enum { INDENT_WIDTH = 2 }; struct null {}; class value { -public: + public: typedef std::vector array; typedef std::map object; union _storage { @@ -139,11 +139,11 @@ public: object* object_; }; -protected: + protected: int type_; _storage u_; -public: + public: value(); value(int type, bool); explicit value(bool b); @@ -179,7 +179,7 @@ public: void serialize(Iter os, bool prettify = false) const; std::string serialize(bool prettify = false) const; -private: + private: template value(const T*); // intentionally defined to block implicit conversion of // pointer to bool @@ -588,13 +588,13 @@ inline std::string value::_serialize(int indent) const { template class input { -protected: + protected: Iter cur_, end_; int last_ch_; bool ungot_; int line_; -public: + public: input(const Iter& first, const Iter& last) : cur_(first), end_(last), last_ch_(-1), ungot_(false), line_(1) {} int getc() { @@ -873,7 +873,7 @@ inline bool _parse(Context& ctx, input& in) { } class deny_parse_context { -public: + public: bool set_null() { return false; } bool set_bool(bool) { return false; } #ifdef PICOJSON_USE_INT64 @@ -898,10 +898,10 @@ public: }; class default_parse_context { -protected: + protected: value* out_; -public: + public: default_parse_context(value* out) : out_(out) {} bool set_null() { *out_ = value(); @@ -949,18 +949,18 @@ public: return _parse(ctx, in); } -private: + private: default_parse_context(const default_parse_context&); default_parse_context& operator=(const default_parse_context&); }; class null_parse_context { -public: + public: struct dummy_str { void push_back(int) {} }; -public: + public: null_parse_context() {} bool set_null() { return true; } bool set_bool(bool) { return true; } @@ -985,7 +985,7 @@ public: return _parse(*this, in); } -private: + private: null_parse_context(const null_parse_context&); null_parse_context& operator=(const null_parse_context&); }; diff --git a/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data b/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data similarity index 100% rename from paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data rename to paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data diff --git a/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list b/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list new file mode 100644 index 0000000000..11c1b1b38b --- /dev/null +++ b/paddle/legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list @@ -0,0 +1 @@ +legacy/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.beam b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.beam similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.beam rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.beam diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nest b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nest similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nest rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nest diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/r1.test.nobeam diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/t1/transtable b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/transtable similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/t1/transtable rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/transtable diff --git a/paddle/trainer/tests/rnn_gen_test_model_dir/t1/wordvec b/paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/wordvec similarity index 100% rename from paddle/trainer/tests/rnn_gen_test_model_dir/t1/wordvec rename to paddle/legacy/trainer/tests/rnn_gen_test_model_dir/t1/wordvec diff --git a/paddle/trainer/tests/sample_data.txt b/paddle/legacy/trainer/tests/sample_data.txt similarity index 100% rename from paddle/trainer/tests/sample_data.txt rename to paddle/legacy/trainer/tests/sample_data.txt diff --git a/paddle/legacy/trainer/tests/sample_filelist.txt b/paddle/legacy/trainer/tests/sample_filelist.txt new file mode 100644 index 0000000000..8573f9e179 --- /dev/null +++ b/paddle/legacy/trainer/tests/sample_filelist.txt @@ -0,0 +1 @@ +legacy/trainer/tests/sample_data.txt diff --git a/paddle/trainer/tests/sample_trainer_config.conf b/paddle/legacy/trainer/tests/sample_trainer_config.conf similarity index 95% rename from paddle/trainer/tests/sample_trainer_config.conf rename to paddle/legacy/trainer/tests/sample_trainer_config.conf index 2697832840..5800b36256 100644 --- a/paddle/trainer/tests/sample_trainer_config.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_config.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) TestData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) diff --git a/paddle/trainer/tests/sample_trainer_config_hsigmoid.conf b/paddle/legacy/trainer/tests/sample_trainer_config_hsigmoid.conf similarity index 96% rename from paddle/trainer/tests/sample_trainer_config_hsigmoid.conf rename to paddle/legacy/trainer/tests/sample_trainer_config_hsigmoid.conf index e4abe31d48..155c40b31f 100644 --- a/paddle/trainer/tests/sample_trainer_config_hsigmoid.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_config_hsigmoid.conf @@ -17,7 +17,7 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000, diff --git a/paddle/trainer/tests/sample_trainer_config_parallel.conf b/paddle/legacy/trainer/tests/sample_trainer_config_parallel.conf similarity index 95% rename from paddle/trainer/tests/sample_trainer_config_parallel.conf rename to paddle/legacy/trainer/tests/sample_trainer_config_parallel.conf index e2b8b3ecda..49cdde7fa2 100644 --- a/paddle/trainer/tests/sample_trainer_config_parallel.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_config_parallel.conf @@ -16,13 +16,13 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) TestData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000)) diff --git a/paddle/trainer/tests/sample_trainer_nest_rnn_gen.conf b/paddle/legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf similarity index 94% rename from paddle/trainer/tests/sample_trainer_nest_rnn_gen.conf rename to paddle/legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf index 741a0aa71d..51ef905a5a 100644 --- a/paddle/trainer/tests/sample_trainer_nest_rnn_gen.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf @@ -63,8 +63,8 @@ beam_gen_concat = recurrent_group(name="rnn_gen_concat", seqtext_printer_evaluator(input=beam_gen_concat, id_input=sent_id, - dict_file="./trainer/tests/test_gen_dict.txt", - result_file="./trainer/tests/dump_text.test") + dict_file="./legacy/trainer/tests/test_gen_dict.txt", + result_file="./legacy/trainer/tests/dump_text.test") #outputs(beam_gen_concat) # In this config, as dummy_data_input doesn't work on beam_gen (we can find dummy_memory # is read-only memory, and isn't used by other layers of step), we show the Inputs and Outputs diff --git a/paddle/trainer/tests/sample_trainer_rnn_gen.conf b/paddle/legacy/trainer/tests/sample_trainer_rnn_gen.conf similarity index 94% rename from paddle/trainer/tests/sample_trainer_rnn_gen.conf rename to paddle/legacy/trainer/tests/sample_trainer_rnn_gen.conf index 58d27f15ae..35c7f0fcd9 100644 --- a/paddle/trainer/tests/sample_trainer_rnn_gen.conf +++ b/paddle/legacy/trainer/tests/sample_trainer_rnn_gen.conf @@ -56,8 +56,8 @@ beam_gen = beam_search(name="rnn_gen", seqtext_printer_evaluator(input=beam_gen, id_input=sent_id, - dict_file="./trainer/tests/test_gen_dict.txt", - result_file="./trainer/tests/dump_text.test") + dict_file="./legacy/trainer/tests/test_gen_dict.txt", + result_file="./legacy/trainer/tests/dump_text.test") #outputs(beam_gen) # In this config, as dummy_data_input doesn't work on beam_gen (we can find dummy_memory # is read-only memory, and isn't used by other layers of step), we show the Inputs and Outputs diff --git a/paddle/trainer/tests/simple_sparse_neural_network.py b/paddle/legacy/trainer/tests/simple_sparse_neural_network.py similarity index 95% rename from paddle/trainer/tests/simple_sparse_neural_network.py rename to paddle/legacy/trainer/tests/simple_sparse_neural_network.py index 970fb466dc..9419f4d903 100644 --- a/paddle/trainer/tests/simple_sparse_neural_network.py +++ b/paddle/legacy/trainer/tests/simple_sparse_neural_network.py @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * settings(batch_size=17, learning_method=AdaGradOptimizer(), learning_rate=1e-4) -file_list = 'trainer/tests/fake_file_list.list' +file_list = 'legacy/trainer/tests/fake_file_list.list' define_py_data_sources2( train_list=file_list, diff --git a/paddle/trainer/tests/simple_sparse_neural_network_dp.py b/paddle/legacy/trainer/tests/simple_sparse_neural_network_dp.py similarity index 100% rename from paddle/trainer/tests/simple_sparse_neural_network_dp.py rename to paddle/legacy/trainer/tests/simple_sparse_neural_network_dp.py diff --git a/paddle/trainer/tests/testPyDataWrapper.py b/paddle/legacy/trainer/tests/testPyDataWrapper.py similarity index 100% rename from paddle/trainer/tests/testPyDataWrapper.py rename to paddle/legacy/trainer/tests/testPyDataWrapper.py diff --git a/paddle/trainer/tests/test_Compare.cpp b/paddle/legacy/trainer/tests/test_Compare.cpp similarity index 96% rename from paddle/trainer/tests/test_Compare.cpp rename to paddle/legacy/trainer/tests/test_Compare.cpp index f3a964acb6..e37e546be8 100644 --- a/paddle/trainer/tests/test_Compare.cpp +++ b/paddle/legacy/trainer/tests/test_Compare.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include -#include "paddle/trainer/Trainer.h" +#include "paddle/legacy/trainer/Trainer.h" #include #include @@ -22,7 +22,8 @@ limitations under the License. */ using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile = "trainer/tests/sample_trainer_config.conf"; +static const string& configFile = + "legacy/trainer/tests/sample_trainer_config.conf"; DECLARE_int32(gpu_id); DECLARE_bool(use_gpu); diff --git a/paddle/trainer/tests/test_PyDataProviderWrapper.cpp b/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp similarity index 96% rename from paddle/trainer/tests/test_PyDataProviderWrapper.cpp rename to paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp index 92dc8aa9ec..847adcfaba 100644 --- a/paddle/trainer/tests/test_PyDataProviderWrapper.cpp +++ b/paddle/legacy/trainer/tests/test_PyDataProviderWrapper.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #ifndef PADDLE_NO_PYTHON #include #include -#include -#include -#include -#include +#include +#include +#include +#include #include #include #include @@ -26,7 +26,7 @@ limitations under the License. */ #include "picojson.h" void checkValue(std::vector& arguments, picojson::array& arr); -const std::string kDir = "./trainer/tests/pydata_provider_wrapper_dir/"; +const std::string kDir = "./legacy/trainer/tests/pydata_provider_wrapper_dir/"; TEST(PyDataProviderWrapper, SequenceData) { paddle::DataConfig conf; diff --git a/paddle/trainer/tests/test_Trainer.cpp b/paddle/legacy/trainer/tests/test_Trainer.cpp similarity index 88% rename from paddle/trainer/tests/test_Trainer.cpp rename to paddle/legacy/trainer/tests/test_Trainer.cpp index 394038cf73..14ad0a2652 100644 --- a/paddle/trainer/tests/test_Trainer.cpp +++ b/paddle/legacy/trainer/tests/test_Trainer.cpp @@ -12,20 +12,21 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include -#include "paddle/trainer/Trainer.h" +#include +#include +#include "paddle/legacy/trainer/Trainer.h" #include using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; +static const string& configFile1 = + "legacy/trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = - "trainer/tests/sample_trainer_config_hsigmoid.conf"; + "legacy/trainer/tests/sample_trainer_config_hsigmoid.conf"; static const string& configFile4 = - "trainer/tests/sample_trainer_config_parallel.conf"; + "legacy/trainer/tests/sample_trainer_config_parallel.conf"; DECLARE_bool(use_gpu); DECLARE_string(config); diff --git a/paddle/trainer/tests/test_TrainerOnePass.cpp b/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp similarity index 95% rename from paddle/trainer/tests/test_TrainerOnePass.cpp rename to paddle/legacy/trainer/tests/test_TrainerOnePass.cpp index b2a93d4d5e..3e5c5ea723 100644 --- a/paddle/trainer/tests/test_TrainerOnePass.cpp +++ b/paddle/legacy/trainer/tests/test_TrainerOnePass.cpp @@ -12,23 +12,24 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include -#include -#include "paddle/trainer/Trainer.h" -#include "paddle/trainer/TrainerInternal.h" +#include +#include +#include "paddle/legacy/trainer/Trainer.h" +#include "paddle/legacy/trainer/TrainerInternal.h" #include -#include +#include using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& configFile1 = "trainer/tests/sample_trainer_config.conf"; +static const string& configFile1 = + "legacy/trainer/tests/sample_trainer_config.conf"; static const string& configFile2 = - "trainer/tests/sample_trainer_config_parallel.conf"; + "legacy/trainer/tests/sample_trainer_config_parallel.conf"; static const string& configFileSimpleSparse = - "trainer/tests/simple_sparse_neural_network.py"; + "legacy/trainer/tests/simple_sparse_neural_network.py"; DECLARE_bool(use_gpu); DECLARE_string(config); @@ -38,7 +39,7 @@ DECLARE_int32(num_passes); DECLARE_int32(saving_period); class TrainerForTest : public paddle::Trainer { -public: + public: inline const std::shared_ptr& getParameterUpdaterForTest() { return this->trainerInternal_.getParameterUpdater(); } diff --git a/paddle/trainer/tests/test_config.conf b/paddle/legacy/trainer/tests/test_config.conf similarity index 97% rename from paddle/trainer/tests/test_config.conf rename to paddle/legacy/trainer/tests/test_config.conf index 2f86aaa753..bce687ad83 100644 --- a/paddle/trainer/tests/test_config.conf +++ b/paddle/legacy/trainer/tests/test_config.conf @@ -16,7 +16,7 @@ from paddle.trainer_config_helpers import * TrainData(SimpleData( - files = "trainer/tests/sample_filelist.txt", + files = "legacy/trainer/tests/sample_filelist.txt", feat_dim = 3, context_len = 0, buffer_capacity = 1000000, diff --git a/paddle/trainer/tests/test_gen_dict.txt b/paddle/legacy/trainer/tests/test_gen_dict.txt similarity index 100% rename from paddle/trainer/tests/test_gen_dict.txt rename to paddle/legacy/trainer/tests/test_gen_dict.txt diff --git a/paddle/trainer/tests/test_recurrent_machine_generation.cpp b/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp similarity index 90% rename from paddle/trainer/tests/test_recurrent_machine_generation.cpp rename to paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp index a8fbe31c2b..47b4e82cd3 100644 --- a/paddle/trainer/tests/test_recurrent_machine_generation.cpp +++ b/paddle/legacy/trainer/tests/test_recurrent_machine_generation.cpp @@ -14,21 +14,23 @@ limitations under the License. */ #include -#include -#include +#include +#include #include using namespace paddle; // NOLINT using namespace std; // NOLINT -static const string& CONFIG_FILE = "trainer/tests/sample_trainer_rnn_gen.conf"; +static const string& CONFIG_FILE = + "legacy/trainer/tests/sample_trainer_rnn_gen.conf"; static const string& NEST_CONFIG_FILE = - "trainer/tests/sample_trainer_nest_rnn_gen.conf"; -static const string& OUTPUT_DIR = "trainer/tests/dump_text.test"; -static string modelDir = "trainer/tests/rnn_gen_test_model_dir/t1"; // NOLINT -static string expectFile = // NOLINT - "trainer/tests/rnn_gen_test_model_dir/r1.test"; // NOLINT + "legacy/trainer/tests/sample_trainer_nest_rnn_gen.conf"; +static const string& OUTPUT_DIR = "legacy/trainer/tests/dump_text.test"; +static string modelDir = + "legacy/trainer/tests/rnn_gen_test_model_dir/t1"; // NOLINT +static string expectFile = // NOLINT + "legacy/trainer/tests/rnn_gen_test_model_dir/r1.test"; // NOLINT DECLARE_string(config_args); diff --git a/paddle/utils/.gitignore b/paddle/legacy/utils/.gitignore similarity index 100% rename from paddle/utils/.gitignore rename to paddle/legacy/utils/.gitignore diff --git a/paddle/utils/Any.h b/paddle/legacy/utils/Any.h similarity index 100% rename from paddle/utils/Any.h rename to paddle/legacy/utils/Any.h diff --git a/paddle/utils/CMakeLists.txt b/paddle/legacy/utils/CMakeLists.txt similarity index 82% rename from paddle/utils/CMakeLists.txt rename to paddle/legacy/utils/CMakeLists.txt index 6292e7fa52..b42b2bae96 100644 --- a/paddle/utils/CMakeLists.txt +++ b/paddle/legacy/utils/CMakeLists.txt @@ -14,9 +14,6 @@ add_library(paddle_utils STATIC ${UTIL_SOURCES} ${UTIL_ARCH_SOURCES} ${UTIL_RES}) -add_style_check_target(paddle_utils ${UTIL_HEADERS}) -add_style_check_target(paddle_utils ${UTIL_SOURCES} - ${UTIL_ARCH_SOURCES}) add_dependencies(paddle_utils paddle_proto ${external_project_dependencies}) if(WITH_TESTING) add_subdirectory(tests) diff --git a/paddle/utils/ClassRegistrar.h b/paddle/legacy/utils/ClassRegistrar.h similarity index 99% rename from paddle/utils/ClassRegistrar.h rename to paddle/legacy/utils/ClassRegistrar.h index 1ac27bafab..5f40a0b25e 100644 --- a/paddle/utils/ClassRegistrar.h +++ b/paddle/legacy/utils/ClassRegistrar.h @@ -41,7 +41,7 @@ namespace paddle { */ template class ClassRegistrar { -public: + public: typedef std::function ClassCreator; // Register a class using a creation function. @@ -74,7 +74,7 @@ public: } } -protected: + protected: std::map creatorMap_; }; diff --git a/paddle/utils/Common.h b/paddle/legacy/utils/Common.h similarity index 100% rename from paddle/utils/Common.h rename to paddle/legacy/utils/Common.h diff --git a/paddle/utils/CpuId.cpp b/paddle/legacy/utils/CpuId.cpp similarity index 96% rename from paddle/utils/CpuId.cpp rename to paddle/legacy/utils/CpuId.cpp index 7186feef04..66e7c6606f 100644 --- a/paddle/utils/CpuId.cpp +++ b/paddle/legacy/utils/CpuId.cpp @@ -9,8 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/CpuId.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CpuId.h" +#include "paddle/legacy/utils/Util.h" #ifdef _WIN32 diff --git a/paddle/utils/CpuId.h b/paddle/legacy/utils/CpuId.h similarity index 99% rename from paddle/utils/CpuId.h rename to paddle/legacy/utils/CpuId.h index 869be5be54..ed58211d13 100644 --- a/paddle/utils/CpuId.h +++ b/paddle/legacy/utils/CpuId.h @@ -35,7 +35,7 @@ enum simd_t { // clang-format on class SIMDFlags final { -public: + public: DISABLE_COPY(SIMDFlags); SIMDFlags(); @@ -46,7 +46,7 @@ public: return !((simd_flags_ & flags) ^ flags); } -private: + private: int simd_flags_ = SIMD_NONE; }; diff --git a/paddle/utils/CustomStackTrace.cpp b/paddle/legacy/utils/CustomStackTrace.cpp similarity index 100% rename from paddle/utils/CustomStackTrace.cpp rename to paddle/legacy/utils/CustomStackTrace.cpp diff --git a/paddle/utils/CustomStackTrace.h b/paddle/legacy/utils/CustomStackTrace.h similarity index 99% rename from paddle/utils/CustomStackTrace.h rename to paddle/legacy/utils/CustomStackTrace.h index 52a6df9497..b60077ea2d 100644 --- a/paddle/utils/CustomStackTrace.h +++ b/paddle/legacy/utils/CustomStackTrace.h @@ -49,7 +49,7 @@ namespace paddle { */ template class CustomStackTrace { -public: + public: /** * @brief Pop out an item from the top of the stack if item == top. * Else, just set status to popping. @@ -136,7 +136,7 @@ public: p.push(item); } -private: + private: /** * Get thread local attribute, and save them into a map (threadId => TYPE*) * @@ -174,7 +174,7 @@ private: return this->getThreadLocal(this->isPushing_, this->pushingBuffers_); } -private: + private: mutable std::mutex mtx_; std::unordered_map*> stackBuffers_; diff --git a/paddle/utils/DynamicLoader.cpp b/paddle/legacy/utils/DynamicLoader.cpp similarity index 100% rename from paddle/utils/DynamicLoader.cpp rename to paddle/legacy/utils/DynamicLoader.cpp diff --git a/paddle/utils/DynamicLoader.h b/paddle/legacy/utils/DynamicLoader.h similarity index 100% rename from paddle/utils/DynamicLoader.h rename to paddle/legacy/utils/DynamicLoader.h diff --git a/paddle/utils/Error.h b/paddle/legacy/utils/Error.h similarity index 99% rename from paddle/utils/Error.h rename to paddle/legacy/utils/Error.h index 7cde983060..1fc8482e3a 100644 --- a/paddle/utils/Error.h +++ b/paddle/legacy/utils/Error.h @@ -95,7 +95,7 @@ namespace paddle { * log(FATAL) and CHECK in Paddle, 'check' method will be removed. */ class Error { -public: + public: /** * Construct a no-error value. */ @@ -138,7 +138,7 @@ public: */ bool isOK() const { return msg_ == nullptr; } -private: + private: std::shared_ptr msg_; }; diff --git a/paddle/utils/Excepts.h b/paddle/legacy/utils/Excepts.h similarity index 100% rename from paddle/utils/Excepts.h rename to paddle/legacy/utils/Excepts.h diff --git a/paddle/utils/Flags.cpp b/paddle/legacy/utils/Flags.cpp similarity index 100% rename from paddle/utils/Flags.cpp rename to paddle/legacy/utils/Flags.cpp diff --git a/paddle/utils/Flags.h b/paddle/legacy/utils/Flags.h similarity index 100% rename from paddle/utils/Flags.h rename to paddle/legacy/utils/Flags.h diff --git a/paddle/utils/GlobalConstants.cpp b/paddle/legacy/utils/GlobalConstants.cpp similarity index 100% rename from paddle/utils/GlobalConstants.cpp rename to paddle/legacy/utils/GlobalConstants.cpp diff --git a/paddle/utils/GlobalConstants.h b/paddle/legacy/utils/GlobalConstants.h similarity index 99% rename from paddle/utils/GlobalConstants.h rename to paddle/legacy/utils/GlobalConstants.h index 0ec1c28dfb..3f45e82268 100644 --- a/paddle/utils/GlobalConstants.h +++ b/paddle/legacy/utils/GlobalConstants.h @@ -78,7 +78,7 @@ enum ParameterType { using namespace enumeration_wrapper; // NOLINT class TrainAlgorithm { -public: + public: static const std::string SGD; static const std::string AsyncSGD; static const std::string OWLQN; diff --git a/paddle/utils/Locks.h b/paddle/legacy/utils/Locks.h similarity index 97% rename from paddle/utils/Locks.h rename to paddle/legacy/utils/Locks.h index e87abb9139..65f983685f 100644 --- a/paddle/utils/Locks.h +++ b/paddle/legacy/utils/Locks.h @@ -42,7 +42,7 @@ namespace paddle { * Use unlock() to unlock the lock. */ class RWLock { -public: + public: RWLock() { pthread_rwlock_init(&rwlock_, NULL); } ~RWLock() { pthread_rwlock_destroy(&rwlock_); } RWLock(const RWLock&) = delete; @@ -62,7 +62,7 @@ public: void lock_shared() { pthread_rwlock_rdlock(&rwlock_); } void unlock() { pthread_rwlock_unlock(&rwlock_); } -protected: + protected: pthread_rwlock_t rwlock_; }; @@ -71,7 +71,7 @@ protected: * using RAII management mechanism. */ class ReadLockGuard { -public: + public: /** * @brief Construct Function. Lock on rwlock in read mode. */ @@ -86,7 +86,7 @@ public: */ ~ReadLockGuard() { rwlock_->unlock(); } -protected: + protected: RWLock* rwlock_; }; @@ -98,7 +98,7 @@ protected: */ class SpinLockPrivate; class SpinLock { -public: + public: DISABLE_COPY(SpinLock); SpinLock(); ~SpinLock(); @@ -107,7 +107,7 @@ public: void lock(); void unlock(); -private: + private: SpinLockPrivate* m; }; @@ -116,7 +116,7 @@ private: */ class SemaphorePrivate; class Semaphore { -public: + public: //! Disable copy & assign Semaphore(const Semaphore& other) = delete; Semaphore& operator=(const Semaphore&& other) = delete; @@ -124,7 +124,7 @@ public: //! Enable move. Semaphore(Semaphore&& other) : m(std::move(other.m)) {} -public: + public: /** * @brief Construct Function. * @param[in] initValue the initial value of the @@ -156,7 +156,7 @@ public: */ void post(); -private: + private: SemaphorePrivate* m; }; @@ -166,7 +166,7 @@ private: */ class ThreadBarrierPrivate; class ThreadBarrier { -public: + public: DISABLE_COPY(ThreadBarrier); /** @@ -184,7 +184,7 @@ public: */ void wait(); -private: + private: ThreadBarrierPrivate* m; }; @@ -192,7 +192,7 @@ private: * A wrapper for condition variable with mutex. */ class LockedCondition : public std::condition_variable { -public: + public: /** * @brief execute op and notify one thread which was blocked. * @param[in] op a thread can do something in op before notify. @@ -235,7 +235,7 @@ public: */ std::mutex* mutex() { return &mutex_; } -protected: + protected: std::mutex mutex_; }; diff --git a/paddle/utils/Logging.cpp b/paddle/legacy/utils/Logging.cpp similarity index 100% rename from paddle/utils/Logging.cpp rename to paddle/legacy/utils/Logging.cpp diff --git a/paddle/utils/Logging.h b/paddle/legacy/utils/Logging.h similarity index 100% rename from paddle/utils/Logging.h rename to paddle/legacy/utils/Logging.h diff --git a/paddle/utils/PythonUtil.cpp b/paddle/legacy/utils/PythonUtil.cpp similarity index 97% rename from paddle/utils/PythonUtil.cpp rename to paddle/legacy/utils/PythonUtil.cpp index 7faeff55c2..21ed049c4d 100644 --- a/paddle/utils/PythonUtil.cpp +++ b/paddle/legacy/utils/PythonUtil.cpp @@ -136,7 +136,13 @@ std::string callPythonFunc(const std::string& moduleName, const std::string& funcName, const std::vector& args) { PyObjectPtr obj = callPythonFuncRetPyObj(moduleName, funcName, args); +#if PY_MAJOR_VERSION >= 3 + Py_ssize_t str_size = 0u; + const char* str = PyUnicode_AsUTF8AndSize(obj.get(), &str_size); + return std::string(str, (size_t)str_size); +#else return std::string(PyString_AsString(obj.get()), PyString_Size(obj.get())); +#endif // PY_MAJOR_VERSION >= 3 } PyObjectPtr createPythonClass( diff --git a/paddle/utils/PythonUtil.h b/paddle/legacy/utils/PythonUtil.h similarity index 90% rename from paddle/utils/PythonUtil.h rename to paddle/legacy/utils/PythonUtil.h index daebaffc85..d5b2dbddde 100644 --- a/paddle/utils/PythonUtil.h +++ b/paddle/legacy/utils/PythonUtil.h @@ -14,7 +14,7 @@ limitations under the License. */ #pragma once // clang-format off -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" #ifndef PADDLE_NO_PYTHON // must include the following two blocks, otherwise, @@ -55,12 +55,12 @@ std::string callPythonFunc(const std::string& moduleName, * NOTE: the lock of this guard is reentrant or recursive. */ class PyGuard { -public: + public: PyGuard(); PyGuard(const PyGuard& other) = delete; PyGuard& operator=(const PyGuard& other) = delete; -private: + private: std::lock_guard guard_; }; @@ -88,6 +88,33 @@ PyObjectPtr createPythonClass(const std::string& moduleName, namespace py { PyObjectPtr import(const std::string& moduleName); +#if PY_MAJOR_VERSION >= 3 +/** + * Cast a PyLong to int type T. + * @tparam T return type. + * @param [in] obj PyLong object. + * @param [out] ok status for casting. False if error occured. nullptr if user + * don't care is ok or not. + * @return The value of python object, or 0 if not ok. + */ +template +T castInt(PyObject* obj, bool* ok = nullptr) { + // Refer to https://www.python.org/dev/peps/pep-0237/, the int and long object + // were unified to long since python3 + if (PyLong_Check(obj)) { + if (ok) *ok = true; + return (T)PyLong_AsUnsignedLong(obj); + } else { + if (ok) *ok = false; + return (T)0; + } +} + +// Convert PyAPI from 2.x to 3.x +#define PyString_FromString PyUnicode_FromString +#define PyString_AsString PyUnicode_AsUTF8 + +#else /** * Cast a PyLong or PyInt to int type T. * @tparam T return type. @@ -109,6 +136,7 @@ T castInt(PyObject* obj, bool* ok = nullptr) { return (T)0; } } +#endif // PY_MAJOR_VERSION >= 3 /** * Invoke repr of python object. @@ -133,7 +161,7 @@ std::string getPyCallStack(); * Implements getAttr method for object. */ class ObjectHelper { -public: + public: explicit ObjectHelper(const PyObjectPtr& obj) : obj_(obj) {} /** @@ -192,7 +220,7 @@ public: return PyObject_IsTrue(tmp.get()); } -private: + private: const PyObjectPtr& obj_; }; @@ -202,7 +230,7 @@ private: * The python sequence means list or tuple. */ class SequenceHelper { -public: + public: explicit SequenceHelper(const PyObjectPtr& seq) : seq_(seq.get()) { CHECK(PySequence_Check(seq_)); } @@ -248,12 +276,12 @@ public: } } -private: + private: PyObject* seq_; }; class DictHelper { -public: + public: explicit DictHelper(PyObject* d) : dict_(d) {} explicit DictHelper(const PyObjectPtr& d) : dict_(d.get()) {} @@ -275,7 +303,7 @@ public: this->set(key, list); } -private: + private: inline void checkDict() { CHECK(PyDict_Check(this->dict_)); } PyObject* dict_; @@ -289,7 +317,7 @@ inline static bool isCallable(const PyObjectPtr& obj) { * Wrap a callable object. */ class CallableHelper { -public: + public: explicit CallableHelper(const PyObjectPtr& obj) : obj_(obj) { CHECK(py::isCallable(obj_)); } @@ -315,7 +343,7 @@ public: return PyObject_Call(obj_.get(), args.get(), kwargs.get()); } -private: + private: const PyObjectPtr& obj_; PyObjectPtr args; PyObjectPtr kwargs; diff --git a/paddle/utils/Queue.h b/paddle/legacy/utils/Queue.h similarity index 99% rename from paddle/utils/Queue.h rename to paddle/legacy/utils/Queue.h index f054738f87..189e1a14f7 100644 --- a/paddle/utils/Queue.h +++ b/paddle/legacy/utils/Queue.h @@ -56,7 +56,7 @@ namespace paddle { */ template class Queue { -public: + public: /** * @brief Construct Function. Default capacity of Queue is zero. */ @@ -147,7 +147,7 @@ public: }); } -private: + private: std::deque elements_; int numElements_; std::mutex queueLock_; @@ -185,7 +185,7 @@ private: */ template class BlockingQueue { -public: + public: /** * @brief Construct Function. * @param[in] capacity the max numer of elements the queue can have. @@ -244,7 +244,7 @@ public: return queue_.empty(); } -private: + private: std::mutex mutex_; std::condition_variable notEmpty_; std::condition_variable notFull_; diff --git a/paddle/utils/Stat.cpp b/paddle/legacy/utils/Stat.cpp similarity index 100% rename from paddle/utils/Stat.cpp rename to paddle/legacy/utils/Stat.cpp diff --git a/paddle/utils/Stat.h b/paddle/legacy/utils/Stat.h similarity index 98% rename from paddle/utils/Stat.h rename to paddle/legacy/utils/Stat.h index 79fd3b8cf0..100e9eba90 100644 --- a/paddle/utils/Stat.h +++ b/paddle/legacy/utils/Stat.h @@ -33,7 +33,7 @@ namespace paddle { class Stat; class StatInfo { -public: + public: explicit StatInfo(Stat* stat = nullptr) : stat_(stat) { total_ = 0; max_ = 0; @@ -61,7 +61,7 @@ class Stat; typedef std::shared_ptr StatPtr; class StatSet { -public: + public: explicit StatSet(const std::string& name) : name_(name) {} ~StatSet() {} @@ -102,7 +102,7 @@ public: // pserver code logic, -_- ). void reset(bool clearRawData = true); -private: + private: std::unordered_map statSet_; const std::string name_; RWLock lock_; @@ -112,7 +112,7 @@ extern StatSet globalStat; /*@brief : a simple stat*/ class Stat { -public: + public: explicit Stat(const std::string& statName) : destructStat_(nullptr), name_(statName), openThreadInfo_(false) {} ~Stat() {} @@ -137,7 +137,7 @@ public: friend class StatInfo; -private: + private: void mergeThreadStat(StatInfo& allThreadStat); std::mutex lock_; @@ -164,7 +164,7 @@ inline uint64_t nowInMicroSec() { * A simple help class to measure time interval */ class Timer { -public: + public: explicit Timer(bool autoStart = true) : total_(0), startStamp_(0) { if (autoStart) { start(); @@ -181,13 +181,13 @@ public: void reset() { total_ = 0; } -protected: + protected: uint64_t total_; uint64_t startStamp_; }; class TimerOnce { -public: + public: TimerOnce(Stat* stat, const char* info = "", uint64_t threshold = -1, @@ -208,7 +208,7 @@ public: stat_->addSample(span); } -private: + private: Stat* stat_; const char* info_; Timer timer_; @@ -280,11 +280,11 @@ inline StatSet& registerTimerArg2(uint64_t threshold = -1, #endif // DISABLE_TIMER class GpuProfiler final { -public: + public: GpuProfiler(std::string statName, std::string info); ~GpuProfiler(); -private: + private: std::lock_guard guard_; }; diff --git a/paddle/utils/StringUtil.cpp b/paddle/legacy/utils/StringUtil.cpp similarity index 100% rename from paddle/utils/StringUtil.cpp rename to paddle/legacy/utils/StringUtil.cpp diff --git a/paddle/utils/StringUtil.h b/paddle/legacy/utils/StringUtil.h similarity index 100% rename from paddle/utils/StringUtil.h rename to paddle/legacy/utils/StringUtil.h diff --git a/paddle/utils/Thread.h b/paddle/legacy/utils/Thread.h similarity index 99% rename from paddle/utils/Thread.h rename to paddle/legacy/utils/Thread.h index ef36a8c5b2..2ee6eba1a6 100644 --- a/paddle/utils/Thread.h +++ b/paddle/legacy/utils/Thread.h @@ -29,7 +29,7 @@ namespace paddle { */ class Thread { -public: + public: /** * @brief Construct Function. Default thread pointer is null. */ @@ -62,7 +62,7 @@ public: */ virtual void run() = 0; -protected: + protected: std::unique_ptr thread_; }; @@ -73,7 +73,7 @@ protected: * Use addJob() to add a new job to the job queue. */ class ThreadWorker : protected Thread { -public: + public: typedef std::function JobFunc; /** @@ -116,7 +116,7 @@ public: finishCV_.wait([this] { return empty_; }); } -protected: + protected: /** * @brief Execute jobs in the job queue sequentianlly, * @note If finish all the jobs in the job queue, @@ -150,7 +150,7 @@ protected: * JobFunc can use tid to divide input data. */ class SyncThreadPool { -public: + public: typedef std::function JobFunc; /** @@ -236,7 +236,7 @@ public: } } -protected: + protected: /** * @brief Start all the workers in the pool, call their run() function. */ @@ -285,7 +285,7 @@ protected: } } -protected: + protected: pid_t ownerThreadId_; bool stopping_; ThreadBarrier jobStartBarrier_; @@ -323,7 +323,7 @@ protected: */ template class MultiThreadWorker { -public: + public: typedef T ResultType; typedef std::shared_ptr ResultPtrType; typedef std::function JobFunc; @@ -424,7 +424,7 @@ public: */ bool testResult() { return results_.empty(); } -protected: + protected: /** * @brief Do the jobs in the job queue sequentianlly * and enqueue the result into the result queue. @@ -476,7 +476,7 @@ protected: * thread pool. */ class AsyncThreadPool { -public: + public: typedef std::function JobFunc; AsyncThreadPool() { LOG(FATAL) << "Not implemented"; } @@ -594,7 +594,7 @@ public: } } -protected: + protected: /** * @brief Execute the jobs in the job queue. */ @@ -606,7 +606,7 @@ protected: } } -private: + private: std::vector> workers_; Queue jobs_; bool stopping_; diff --git a/paddle/utils/ThreadLocal.cpp b/paddle/legacy/utils/ThreadLocal.cpp similarity index 100% rename from paddle/utils/ThreadLocal.cpp rename to paddle/legacy/utils/ThreadLocal.cpp diff --git a/paddle/utils/ThreadLocal.h b/paddle/legacy/utils/ThreadLocal.h similarity index 98% rename from paddle/utils/ThreadLocal.h rename to paddle/legacy/utils/ThreadLocal.h index 0a27b8b97b..c5b07506d3 100644 --- a/paddle/utils/ThreadLocal.h +++ b/paddle/legacy/utils/ThreadLocal.h @@ -49,7 +49,7 @@ namespace paddle { */ template class ThreadLocal { -public: + public: ThreadLocal() { CHECK_EQ(pthread_key_create(&threadSpecificKey_, dataDestructor), 0); } @@ -92,7 +92,7 @@ public: */ operator T*() { return get(); } -private: + private: static void dataDestructor(void* p) { delete (T*)p; } pthread_key_t threadSpecificKey_; @@ -111,7 +111,7 @@ private: */ template class ThreadLocalD { -public: + public: ThreadLocalD() { CHECK_EQ(pthread_key_create(&threadSpecificKey_, NULL), 0); } ~ThreadLocalD() { pthread_key_delete(threadSpecificKey_); @@ -150,7 +150,7 @@ public: */ T& operator*() { return *get(); } -private: + private: static void dataDestructor(void* p) { delete (T*)p; } void updateMap(T* p) { @@ -172,7 +172,7 @@ private: * @brief Thread-safe C-style random API. */ class ThreadLocalRand { -public: + public: /** * initSeed just like srand, * called by main thread, @@ -205,7 +205,7 @@ public: */ static int getDefaultSeed() { return defaultSeed_; } -protected: + protected: static unsigned int defaultSeed_; static ThreadLocal seed_; }; @@ -214,7 +214,7 @@ protected: * @brief Thread-safe C++ style random engine. */ class ThreadLocalRandomEngine { -public: + public: /** * get random_engine for each thread. * @@ -222,7 +222,7 @@ public: */ static std::default_random_engine& get(); -protected: + protected: static ThreadLocal engine_; }; diff --git a/paddle/utils/Util.cpp b/paddle/legacy/utils/Util.cpp similarity index 100% rename from paddle/utils/Util.cpp rename to paddle/legacy/utils/Util.cpp diff --git a/paddle/utils/Util.h b/paddle/legacy/utils/Util.h similarity index 99% rename from paddle/utils/Util.h rename to paddle/legacy/utils/Util.h index 9579881ea3..e6f05e30d3 100644 --- a/paddle/utils/Util.h +++ b/paddle/legacy/utils/Util.h @@ -179,7 +179,7 @@ void loadFileList(const std::string& fileListFileName, */ void registerInitFunction(std::function func, int priority = 0); class InitFunction { -public: + public: explicit InitFunction(std::function func, int priority = 0) { registerInitFunction(func, priority); } @@ -191,7 +191,7 @@ public: * When the SetDevice object is destructed, it will restore device environment. */ class SetDevice { -public: + public: explicit SetDevice(int deviceId) { isSet_ = deviceId >= 0; devId_ = 0; @@ -206,7 +206,7 @@ public: } } -protected: + protected: bool isSet_; int devId_; }; @@ -240,7 +240,7 @@ inline void enablePeerAccess(int d1, int d2) { * } */ class AsyncGpuBlock { -public: + public: AsyncGpuBlock() : syncFlag_(hl_get_sync_flag()) { hl_set_sync_flag(false); } ~AsyncGpuBlock() { if (syncFlag_) { @@ -249,7 +249,7 @@ public: } } -private: + private: bool syncFlag_; }; @@ -378,7 +378,7 @@ std::string join(const std::string& part1, * A Checker for each invoke of method in same thread. */ class SameThreadChecker { -public: + public: SameThreadChecker() {} /** @@ -400,7 +400,7 @@ public: << invokeThreadId_ << " current invoked in " << curThreadId; } -private: + private: std::once_flag onceFlag_; std::thread::id invokeThreadId_; }; @@ -421,7 +421,7 @@ private: */ template class WeakKVCache { -public: + public: WeakKVCache() {} std::shared_ptr get(const KType& key, @@ -442,7 +442,7 @@ public: return retVal; } -private: + private: std::mutex lock_; std::unordered_map, Hash> storage_; }; @@ -453,7 +453,7 @@ private: */ template class ScopedCallbacks { -public: + public: ScopedCallbacks(CallbackType enter, CallbackType exit, Args&... args) : exit_(std::bind(exit, args...)) { enter(args...); @@ -464,7 +464,7 @@ public: ~ScopedCallbacks() { exit_(); } -private: + private: std::function exit_; }; @@ -475,7 +475,7 @@ private: */ template class AlignedAllocator { -public: + public: /// std campatible typedefs. typedef T* pointer; typedef const T* const_pointer; @@ -552,12 +552,12 @@ public: return this->allocate(n); } -private: + private: AlignedAllocator& operator=(const AlignedAllocator&); // disable }; class Deprecated { -public: + public: explicit Deprecated(const std::string& msg = "") { if (msg.empty()) { LOG(WARNING) << "This class is deprecated, please do not use this class."; diff --git a/paddle/utils/Version.cpp b/paddle/legacy/utils/Version.cpp similarity index 100% rename from paddle/utils/Version.cpp rename to paddle/legacy/utils/Version.cpp diff --git a/paddle/utils/Version.h b/paddle/legacy/utils/Version.h similarity index 100% rename from paddle/utils/Version.h rename to paddle/legacy/utils/Version.h diff --git a/paddle/utils/arch/linux/Locks.cpp b/paddle/legacy/utils/arch/linux/Locks.cpp similarity index 96% rename from paddle/utils/arch/linux/Locks.cpp rename to paddle/legacy/utils/arch/linux/Locks.cpp index a4e6c8f7b8..32d351e332 100644 --- a/paddle/utils/arch/linux/Locks.cpp +++ b/paddle/legacy/utils/arch/linux/Locks.cpp @@ -12,14 +12,14 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { class SemaphorePrivate { -public: + public: sem_t sem; }; @@ -45,7 +45,7 @@ void Semaphore::post() { sem_post(&m->sem); } #ifdef PADDLE_USE_PTHREAD_SPINLOCK class SpinLockPrivate { -public: + public: inline SpinLockPrivate() { pthread_spin_init(&lock_, 0); } inline ~SpinLockPrivate() { pthread_spin_destroy(&lock_); } @@ -63,7 +63,7 @@ public: // clang-format on class SpinLockPrivate { -public: + public: inline void lock() { while (lock_.test_and_set(std::memory_order_acquire)) { } @@ -86,7 +86,7 @@ void SpinLock::unlock() { m->unlock(); } #ifdef PADDLE_USE_PTHREAD_BARRIER class ThreadBarrierPrivate { -public: + public: pthread_barrier_t barrier_; inline explicit ThreadBarrierPrivate(int count) { @@ -101,7 +101,7 @@ public: #else class ThreadBarrierPrivate { -public: + public: pthread_mutex_t mutex_; pthread_cond_t cond_; int count_; diff --git a/paddle/utils/arch/osx/Excepts.cpp b/paddle/legacy/utils/arch/osx/Excepts.cpp similarity index 97% rename from paddle/utils/arch/osx/Excepts.cpp rename to paddle/legacy/utils/arch/osx/Excepts.cpp index ac44461578..2b7d6dca84 100644 --- a/paddle/utils/arch/osx/Excepts.cpp +++ b/paddle/legacy/utils/arch/osx/Excepts.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Excepts.h" +#include "paddle/legacy/utils/Excepts.h" #if defined(__APPLE__) || defined(__OSX__) #if defined(__arm__) || defined(__arm64__) diff --git a/paddle/utils/arch/osx/Locks.cpp b/paddle/legacy/utils/arch/osx/Locks.cpp similarity index 96% rename from paddle/utils/arch/osx/Locks.cpp rename to paddle/legacy/utils/arch/osx/Locks.cpp index e03992363f..b68c48f0c3 100644 --- a/paddle/utils/arch/osx/Locks.cpp +++ b/paddle/legacy/utils/arch/osx/Locks.cpp @@ -12,16 +12,16 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Locks.h" +#include "paddle/legacy/utils/Locks.h" #include #include #include -#include "paddle/utils/Logging.h" +#include "paddle/legacy/utils/Logging.h" namespace paddle { class SemaphorePrivate { -public: + public: ~SemaphorePrivate() { dispatch_release(sem); } dispatch_semaphore_t sem; @@ -45,7 +45,7 @@ void Semaphore::wait() { void Semaphore::post() { dispatch_semaphore_signal(m->sem); } class SpinLockPrivate { -public: + public: std::atomic_flag lock_ = ATOMIC_FLAG_INIT; char padding_[64 - sizeof(lock_)]; // Padding to cache line size }; @@ -61,7 +61,7 @@ void SpinLock::lock() { void SpinLock::unlock() { m->lock_.clear(std::memory_order_release); } class ThreadBarrierPrivate { -public: + public: pthread_mutex_t mutex_; pthread_cond_t cond_; int count_; diff --git a/paddle/utils/enable_virtualenv.py b/paddle/legacy/utils/enable_virtualenv.py similarity index 100% rename from paddle/utils/enable_virtualenv.py rename to paddle/legacy/utils/enable_virtualenv.py diff --git a/paddle/utils/tests/CMakeLists.txt b/paddle/legacy/utils/tests/CMakeLists.txt similarity index 84% rename from paddle/utils/tests/CMakeLists.txt rename to paddle/legacy/utils/tests/CMakeLists.txt index c770ce1698..4af01db5c8 100644 --- a/paddle/utils/tests/CMakeLists.txt +++ b/paddle/legacy/utils/tests/CMakeLists.txt @@ -13,6 +13,6 @@ add_executable( link_paddle_exe(test_CustomStackTracePrint) if(NOT APPLE) add_test(NAME test_CustomStackTracePrint - COMMAND ${PADDLE_SOURCE_DIR}/paddle/utils/tests/test_CustomStackTracePrint.sh + COMMAND ${PADDLE_SOURCE_DIR}/paddle/legacy/utils/tests/test_CustomStackTracePrint.sh WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) endif() diff --git a/paddle/utils/tests/test_CustomStackTrace.cpp b/paddle/legacy/utils/tests/test_CustomStackTrace.cpp similarity index 94% rename from paddle/utils/tests/test_CustomStackTrace.cpp rename to paddle/legacy/utils/tests/test_CustomStackTrace.cpp index 4d5540b24c..2a418e3ae2 100644 --- a/paddle/utils/tests/test_CustomStackTrace.cpp +++ b/paddle/legacy/utils/tests/test_CustomStackTrace.cpp @@ -15,10 +15,10 @@ limitations under the License. */ #include // NOLINT #include // NOLINT -#include "paddle/utils/CustomStackTrace.h" -#include "paddle/utils/Locks.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CustomStackTrace.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" DEFINE_int32(test_thread_num, 10, "testing thread number"); diff --git a/paddle/utils/tests/test_CustomStackTracePrint.cpp b/paddle/legacy/utils/tests/test_CustomStackTracePrint.cpp similarity index 86% rename from paddle/utils/tests/test_CustomStackTracePrint.cpp rename to paddle/legacy/utils/tests/test_CustomStackTracePrint.cpp index 360c61c88a..78886a3ed9 100644 --- a/paddle/utils/tests/test_CustomStackTracePrint.cpp +++ b/paddle/legacy/utils/tests/test_CustomStackTracePrint.cpp @@ -12,9 +12,9 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/CustomStackTrace.h" -#include "paddle/utils/StringUtil.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CustomStackTrace.h" +#include "paddle/legacy/utils/StringUtil.h" +#include "paddle/legacy/utils/Util.h" int main(int argc, char** argv) { paddle::initMain(argc, argv); diff --git a/paddle/utils/tests/test_CustomStackTracePrint.sh b/paddle/legacy/utils/tests/test_CustomStackTracePrint.sh similarity index 100% rename from paddle/utils/tests/test_CustomStackTracePrint.sh rename to paddle/legacy/utils/tests/test_CustomStackTracePrint.sh diff --git a/paddle/utils/tests/test_Error.cpp b/paddle/legacy/utils/tests/test_Error.cpp similarity index 96% rename from paddle/utils/tests/test_Error.cpp rename to paddle/legacy/utils/tests/test_Error.cpp index 6f311fa6b8..250c4d58a6 100644 --- a/paddle/utils/tests/test_Error.cpp +++ b/paddle/legacy/utils/tests/test_Error.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/Error.h" +#include "paddle/legacy/utils/Error.h" #include diff --git a/paddle/utils/tests/test_SIMDFlags.cpp b/paddle/legacy/utils/tests/test_SIMDFlags.cpp similarity index 94% rename from paddle/utils/tests/test_SIMDFlags.cpp rename to paddle/legacy/utils/tests/test_SIMDFlags.cpp index a808d456a6..6362210acd 100644 --- a/paddle/utils/tests/test_SIMDFlags.cpp +++ b/paddle/legacy/utils/tests/test_SIMDFlags.cpp @@ -11,9 +11,9 @@ limitations under the License. */ #include -#include "paddle/utils/CpuId.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/CpuId.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" using namespace paddle; // NOLINT diff --git a/paddle/utils/tests/test_SpinLock.cpp b/paddle/legacy/utils/tests/test_SpinLock.cpp similarity index 93% rename from paddle/utils/tests/test_SpinLock.cpp rename to paddle/legacy/utils/tests/test_SpinLock.cpp index cc34eb1f86..4cd7836d6a 100644 --- a/paddle/utils/tests/test_SpinLock.cpp +++ b/paddle/legacy/utils/tests/test_SpinLock.cpp @@ -17,9 +17,9 @@ limitations under the License. */ #include #include -#include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" DEFINE_int32(test_thread_num, 100, "testing thread number"); diff --git a/paddle/utils/tests/test_StringUtils.cpp b/paddle/legacy/utils/tests/test_StringUtils.cpp similarity index 95% rename from paddle/utils/tests/test_StringUtils.cpp rename to paddle/legacy/utils/tests/test_StringUtils.cpp index 248f58a7f2..61d2815f09 100644 --- a/paddle/utils/tests/test_StringUtils.cpp +++ b/paddle/legacy/utils/tests/test_StringUtils.cpp @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include "paddle/utils/StringUtil.h" +#include "paddle/legacy/utils/StringUtil.h" #include diff --git a/paddle/utils/tests/test_Thread.cpp b/paddle/legacy/utils/tests/test_Thread.cpp similarity index 98% rename from paddle/utils/tests/test_Thread.cpp rename to paddle/legacy/utils/tests/test_Thread.cpp index 6e2580c491..5e07da3236 100644 --- a/paddle/utils/tests/test_Thread.cpp +++ b/paddle/legacy/utils/tests/test_Thread.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include +#include #include using paddle::AsyncThreadPool; // NOLINT diff --git a/paddle/utils/tests/test_ThreadBarrier.cpp b/paddle/legacy/utils/tests/test_ThreadBarrier.cpp similarity index 94% rename from paddle/utils/tests/test_ThreadBarrier.cpp rename to paddle/legacy/utils/tests/test_ThreadBarrier.cpp index 554b1c1d4a..9c8851ae21 100644 --- a/paddle/utils/tests/test_ThreadBarrier.cpp +++ b/paddle/legacy/utils/tests/test_ThreadBarrier.cpp @@ -18,9 +18,9 @@ limitations under the License. */ #include #include -#include "paddle/utils/Locks.h" -#include "paddle/utils/Logging.h" -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Locks.h" +#include "paddle/legacy/utils/Logging.h" +#include "paddle/legacy/utils/Util.h" DEFINE_int32(test_thread_num, 100, "testing thread number"); diff --git a/paddle/optimizer/CMakeLists.txt b/paddle/optimizer/CMakeLists.txt deleted file mode 100644 index 25fc35311f..0000000000 --- a/paddle/optimizer/CMakeLists.txt +++ /dev/null @@ -1,12 +0,0 @@ -set(OPITMIZER_SRCS - adadelta_optimizer.cc - adagrad_optimizer.cc - adam_optimizer.cc - optimizer.cc - parameter_optimizer.cc - sgd_optimizer.cc - ) - -cc_library(paddle_optimizer STATIC SRCS ${OPITMIZER_SRCS} DEPS paddle_proto glog) -cc_test(serialization_test SRCS serialization_test.cc DEPS paddle_proto) -cc_test(parameter_optimizer_test SRCS parameter_optimizer_test.cc DEPS paddle_optimizer) diff --git a/paddle/scripts/docker/build.sh b/paddle/scripts/docker/build.sh deleted file mode 100755 index 7e00bd3848..0000000000 --- a/paddle/scripts/docker/build.sh +++ /dev/null @@ -1,259 +0,0 @@ -#!/bin/bash - -function cmake_gen() { - mkdir -p /paddle/build - cd /paddle/build - - # build script will not fail if *.deb does not exist - rm *.deb 2>/dev/null || true - # delete previous built whl packages - rm -rf /paddle/paddle/dist 2>/dev/null || true - - # Support build for all python versions, currently - # including cp27-cp27m and cp27-cp27mu. - PYTHON_FLAGS="" - if [ "$1" != "" ]; then - echo "using python abi: $1" - if [ "$1" == "cp27-cp27m" ]; then - export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} - export PATH=/opt/python/cp27-cp27m/bin/:${PATH} - PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python - -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 - -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" - elif [ "$1" == "cp27-cp27mu" ]; then - export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} - export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} - PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python - -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 - -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" - fi - fi - - cat < /paddle/build/Dockerfile < - ENV HOME /root -EOF - - if [[ ${WITH_GPU} == "ON" ]]; then - NCCL_DEPS="apt-get install -y libnccl2=2.1.2-1+cuda8.0 libnccl-dev=2.1.2-1+cuda8.0 &&" - else - NCCL_DEPS="" - fi - - if [[ ${WITH_FLUID_ONLY:-OFF} == "OFF" ]]; then - PADDLE_VERSION="paddle version" - CMD='"paddle", "version"' - else - PADDLE_VERSION="true" - CMD='"true"' - fi - - cat >> /paddle/build/Dockerfile <> /paddle/build/Dockerfile <> /paddle/build/Dockerfile <= 21." - ANDROID_API=21 - fi -else # armeabi, armeabi-v7a - ANDROID_ARCH=arm -fi - -ANDROID_STANDALONE_TOOLCHAIN=$ANDROID_TOOLCHAINS_DIR/$ANDROID_ARCH-android-$ANDROID_API - -cat < new.spec + python ${PADDLE_ROOT}/tools/diff_api.py ${PADDLE_ROOT}/paddle/fluid/API.spec new.spec + deactivate + + API_CHANGE=`git diff --name-only upstream/develop | grep "paddle/fluid/API.spec" || true` + echo "checking API.spec change, PR: ${GIT_PR_ID}, changes: ${API_CHANGE}" + if [ ${API_CHANGE} ] && [ "${GIT_PR_ID}" != "" ]; then + # TODO: curl -H 'Authorization: token ${TOKEN}' + APPROVALS=`curl -H "Authorization: token ${GITHUB_API_TOKEN}" https://api.github.com/repos/PaddlePaddle/Paddle/pulls/${GIT_PR_ID}/reviews | \ + python ${PADDLE_ROOT}/tools/check_pr_approval.py 2 7845005 2887803 728699 13348433` + echo "current pr ${GIT_PR_ID} got approvals: ${APPROVALS}" + if [ "${APPROVALS}" == "FALSE" ]; then + echo "You must have at least 2 approvals for the api change!" + exit 1 + fi + fi +} + + +function single_test() { + TEST_NAME=$1 + if [ -z "${TEST_NAME}" ]; then + echo -e "${RED}Usage:${NONE}" + echo -e "${BOLD}${SCRIPT_NAME}${NONE} ${BLUE}single_test${NONE} [test_name]" + exit 1 + fi + mkdir -p ${PADDLE_ROOT}/build + cd ${PADDLE_ROOT}/build + if [ ${WITH_TESTING:-ON} == "ON" ] ; then + cat <> /paddle/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile <> ${PADDLE_ROOT}/build/Dockerfile < /dev/null - return $? -} - function start_build_docker() { docker pull $IMG - if container_running "${CONTAINER_ID}"; then - docker stop "${CONTAINER_ID}" 1>/dev/null - docker rm -f "${CONTAINER_ID}" 1>/dev/null - fi - apt_mirror='s#http://archive.ubuntu.com/ubuntu#mirror://mirrors.ubuntu.com/mirrors.txt#g' DOCKER_ENV=$(cat <&2 - echo "Please use pre-commit to check what is wrong." 1>&2 - exit 1 -} - -trap 'abort' 0 -set -e - -# install glide -curl https://glide.sh/get | bash -eval "$(GIMME_GO_VERSION=1.8.3 gimme)" - -# set up go environment for running gometalinter -mkdir -p $GOPATH/src/github.com/PaddlePaddle/ -ln -sf $TRAVIS_BUILD_DIR $GOPATH/src/github.com/PaddlePaddle/Paddle -cd $GOPATH/src/github.com/PaddlePaddle/Paddle/go; glide install; cd - - -go get github.com/alecthomas/gometalinter -gometalinter --install - -cd $TRAVIS_BUILD_DIR -export PATH=/usr/bin:$PATH -pre-commit install -clang-format --version - - - -if ! pre-commit run -a ; then - git diff - exit 1 -fi - -trap : 0 diff --git a/paddle/scripts/travis/deploy_key.enc b/paddle/scripts/travis/deploy_key.enc deleted file mode 100644 index b0aa45c5ac..0000000000 Binary files a/paddle/scripts/travis/deploy_key.enc and /dev/null differ diff --git a/paddle/testing/CMakeLists.txt b/paddle/testing/CMakeLists.txt index a1f446817e..2264481899 100644 --- a/paddle/testing/CMakeLists.txt +++ b/paddle/testing/CMakeLists.txt @@ -6,6 +6,6 @@ if(WITH_TESTING) add_library(paddle_test_util STATIC TestUtil.cpp) add_dependencies(paddle_test_util paddle_proto ${external_project_dependencies}) if(NOT MOBILE_INFERENCE) - cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS init memory gtest gflags) + cc_library(paddle_gtest_main SRCS paddle_gtest_main.cc DEPS device_context memory gtest gflags) endif() endif() diff --git a/paddle/testing/TestMain.cpp b/paddle/testing/TestMain.cpp index 3e14532d18..1811dbbd1a 100644 --- a/paddle/testing/TestMain.cpp +++ b/paddle/testing/TestMain.cpp @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. */ #include -#include "paddle/utils/Util.h" +#include "paddle/legacy/utils/Util.h" int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); diff --git a/paddle/testing/TestUtil.cpp b/paddle/testing/TestUtil.cpp index cfb8c713d9..fa8efc20f5 100644 --- a/paddle/testing/TestUtil.cpp +++ b/paddle/testing/TestUtil.cpp @@ -14,7 +14,7 @@ limitations under the License. */ #include "TestUtil.h" #include -#include "paddle/math/SparseMatrix.h" +#include "paddle/legacy/math/SparseMatrix.h" DEFINE_int32(fixed_seq_length, 0, "Produce some sequence of fixed length"); diff --git a/paddle/testing/TestUtil.h b/paddle/testing/TestUtil.h index ec86469aeb..98b864e3c5 100644 --- a/paddle/testing/TestUtil.h +++ b/paddle/testing/TestUtil.h @@ -15,7 +15,7 @@ limitations under the License. */ #pragma once #include -#include "paddle/math/Matrix.h" +#include "paddle/legacy/math/Matrix.h" namespace paddle { diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 586ec48477..cfea2059c3 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -16,8 +16,8 @@ limitations under the License. */ #include "gflags/gflags.h" #include "gtest/gtest.h" -#include "paddle/fluid/framework/init.h" #include "paddle/fluid/memory/memory.h" +#include "paddle/fluid/platform/init.h" int main(int argc, char** argv) { testing::InitGoogleTest(&argc, argv); @@ -30,7 +30,9 @@ int main(int argc, char** argv) { new_argv.push_back( strdup("--tryfromenv=fraction_of_gpu_memory_to_use,use_pinned_memory")); #else - new_argv.push_back(strdup("--tryfromenv=use_pinned_memory")); + new_argv.push_back(strdup( + "--tryfromenv=use_pinned_memory,use_mkldnn,initial_cpu_memory_in_mb")); + new_argv.push_back(strdup("--undefok=use_mkldnn,initial_cpu_memory_in_mb")); #endif int new_argc = static_cast(new_argv.size()); char** new_argv_address = new_argv.data(); diff --git a/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list b/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list deleted file mode 100644 index 0db50f34dd..0000000000 --- a/paddle/trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.list +++ /dev/null @@ -1 +0,0 @@ -trainer/tests/pydata_provider_wrapper_dir/test_pydata_provider_wrapper.data diff --git a/paddle/trainer/tests/sample_filelist.txt b/paddle/trainer/tests/sample_filelist.txt deleted file mode 100644 index 7db4c73535..0000000000 --- a/paddle/trainer/tests/sample_filelist.txt +++ /dev/null @@ -1 +0,0 @@ -trainer/tests/sample_data.txt diff --git a/patches/grpc/completion_queue.h b/patches/grpc/completion_queue.h new file mode 100644 index 0000000000..6e92c60ea2 --- /dev/null +++ b/patches/grpc/completion_queue.h @@ -0,0 +1,386 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/// A completion queue implements a concurrent producer-consumer queue, with +/// two main API-exposed methods: \a Next and \a AsyncNext. These +/// methods are the essential component of the gRPC C++ asynchronous API. +/// There is also a \a Shutdown method to indicate that a given completion queue +/// will no longer have regular events. This must be called before the +/// completion queue is destroyed. +/// All completion queue APIs are thread-safe and may be used concurrently with +/// any other completion queue API invocation; it is acceptable to have +/// multiple threads calling \a Next or \a AsyncNext on the same or different +/// completion queues, or to call these methods concurrently with a \a Shutdown +/// elsewhere. +/// \remark{All other API calls on completion queue should be completed before +/// a completion queue destructor is called.} +#ifndef GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H +#define GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H + +#include + +#include +#include +#include +#include +#include +#include + +struct grpc_completion_queue; + +namespace grpc { + +template +class ClientReader; +template +class ClientWriter; +template +class ClientReaderWriter; +template +class ServerReader; +template +class ServerWriter; +namespace internal { +template +class ServerReaderWriterBody; +} // namespace internal + +class Channel; +class ChannelInterface; +class ClientContext; +class CompletionQueue; +class Server; +class ServerBuilder; +class ServerContext; +class ServerInterface; + +namespace internal { +class CompletionQueueTag; +class RpcMethod; +template +class RpcMethodHandler; +template +class ClientStreamingHandler; +template +class ServerStreamingHandler; +template +class BidiStreamingHandler; +class UnknownMethodHandler; +template +class TemplatedBidiStreamingHandler; +template +class BlockingUnaryCallImpl; +} // namespace internal + +extern CoreCodegenInterface* g_core_codegen_interface; + +/// A thin wrapper around \ref grpc_completion_queue (see \ref +/// src/core/lib/surface/completion_queue.h). +/// See \ref doc/cpp/perf_notes.md for notes on best practices for high +/// performance servers. +class CompletionQueue : private GrpcLibraryCodegen { + public: + /// Default constructor. Implicitly creates a \a grpc_completion_queue + /// instance. + CompletionQueue() + : CompletionQueue(grpc_completion_queue_attributes{ + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, GRPC_CQ_DEFAULT_POLLING}) {} + + /// Wrap \a take, taking ownership of the instance. + /// + /// \param take The completion queue instance to wrap. Ownership is taken. + explicit CompletionQueue(grpc_completion_queue* take); + + /// Destructor. Destroys the owned wrapped completion queue / instance. + ~CompletionQueue() { + if (typeid(*g_core_codegen_interface).hash_code() != + typeid(CoreCodegenInterface).hash_code()) { + g_core_codegen_interface->grpc_completion_queue_destroy(cq_); + } + } + + /// Tri-state return for AsyncNext: SHUTDOWN, GOT_EVENT, TIMEOUT. + enum NextStatus { + SHUTDOWN, ///< The completion queue has been shutdown and fully-drained + GOT_EVENT, ///< Got a new event; \a tag will be filled in with its + ///< associated value; \a ok indicating its success. + TIMEOUT ///< deadline was reached. + }; + + /// Read from the queue, blocking until an event is available or the queue is + /// shutting down. + /// + /// \param tag[out] Updated to point to the read event's tag. + /// \param ok[out] true if read a successful event, false otherwise. + /// + /// Note that each tag sent to the completion queue (through RPC operations + /// or alarms) will be delivered out of the completion queue by a call to + /// Next (or a related method), regardless of whether the operation succeeded + /// or not. Success here means that this operation completed in the normal + /// valid manner. + /// + /// Server-side RPC request: \a ok indicates that the RPC has indeed + /// been started. If it is false, the server has been Shutdown + /// before this particular call got matched to an incoming RPC. + /// + /// Client-side StartCall/RPC invocation: \a ok indicates that the RPC is + /// going to go to the wire. If it is false, it not going to the wire. This + /// would happen if the channel is either permanently broken or + /// transiently broken but with the fail-fast option. (Note that async unary + /// RPCs don't post a CQ tag at this point, nor do client-streaming + /// or bidi-streaming RPCs that have the initial metadata corked option set.) + /// + /// Client-side Write, Client-side WritesDone, Server-side Write, + /// Server-side Finish, Server-side SendInitialMetadata (which is + /// typically included in Write or Finish when not done explicitly): + /// \a ok means that the data/metadata/status/etc is going to go to the + /// wire. If it is false, it not going to the wire because the call + /// is already dead (i.e., canceled, deadline expired, other side + /// dropped the channel, etc). + /// + /// Client-side Read, Server-side Read, Client-side + /// RecvInitialMetadata (which is typically included in Read if not + /// done explicitly): \a ok indicates whether there is a valid message + /// that got read. If not, you know that there are certainly no more + /// messages that can ever be read from this stream. For the client-side + /// operations, this only happens because the call is dead. For the + /// server-sider operation, though, this could happen because the client + /// has done a WritesDone already. + /// + /// Client-side Finish: \a ok should always be true + /// + /// Server-side AsyncNotifyWhenDone: \a ok should always be true + /// + /// Alarm: \a ok is true if it expired, false if it was canceled + /// + /// \return true if got an event, false if the queue is fully drained and + /// shut down. + bool Next(void** tag, bool* ok) { + return (AsyncNextInternal(tag, + ok, + g_core_codegen_interface->gpr_inf_future( + GPR_CLOCK_REALTIME)) != SHUTDOWN); + } + + /// Read from the queue, blocking up to \a deadline (or the queue's shutdown). + /// Both \a tag and \a ok are updated upon success (if an event is available + /// within the \a deadline). A \a tag points to an arbitrary location usually + /// employed to uniquely identify an event. + /// + /// \param tag[out] Upon sucess, updated to point to the event's tag. + /// \param ok[out] Upon sucess, true if a successful event, false otherwise + /// See documentation for CompletionQueue::Next for explanation of ok + /// \param deadline[in] How long to block in wait for an event. + /// + /// \return The type of event read. + template + NextStatus AsyncNext(void** tag, bool* ok, const T& deadline) { + TimePoint deadline_tp(deadline); + return AsyncNextInternal(tag, ok, deadline_tp.raw_time()); + } + + /// EXPERIMENTAL + /// First executes \a F, then reads from the queue, blocking up to + /// \a deadline (or the queue's shutdown). + /// Both \a tag and \a ok are updated upon success (if an event is available + /// within the \a deadline). A \a tag points to an arbitrary location usually + /// employed to uniquely identify an event. + /// + /// \param F[in] Function to execute before calling AsyncNext on this queue. + /// \param tag[out] Upon sucess, updated to point to the event's tag. + /// \param ok[out] Upon sucess, true if read a regular event, false otherwise. + /// \param deadline[in] How long to block in wait for an event. + /// + /// \return The type of event read. + template + NextStatus DoThenAsyncNext(F&& f, void** tag, bool* ok, const T& deadline) { + CompletionQueueTLSCache cache = CompletionQueueTLSCache(this); + f(); + if (cache.Flush(tag, ok)) { + return GOT_EVENT; + } else { + return AsyncNext(tag, ok, deadline); + } + } + + /// Request the shutdown of the queue. + /// + /// \warning This method must be called at some point if this completion queue + /// is accessed with Next or AsyncNext. \a Next will not return false + /// until this method has been called and all pending tags have been drained. + /// (Likewise for \a AsyncNext returning \a NextStatus::SHUTDOWN .) + /// Only once either one of these methods does that (that is, once the queue + /// has been \em drained) can an instance of this class be destroyed. + /// Also note that applications must ensure that no work is enqueued on this + /// completion queue after this method is called. + void Shutdown(); + + /// Returns a \em raw pointer to the underlying \a grpc_completion_queue + /// instance. + /// + /// \warning Remember that the returned instance is owned. No transfer of + /// owership is performed. + grpc_completion_queue* cq() { return cq_; } + + protected: + /// Private constructor of CompletionQueue only visible to friend classes + CompletionQueue(const grpc_completion_queue_attributes& attributes) { + cq_ = g_core_codegen_interface->grpc_completion_queue_create( + g_core_codegen_interface->grpc_completion_queue_factory_lookup( + &attributes), + &attributes, + NULL); + InitialAvalanching(); // reserve this for the future shutdown + } + + private: + // Friend synchronous wrappers so that they can access Pluck(), which is + // a semi-private API geared towards the synchronous implementation. + template + friend class ::grpc::ClientReader; + template + friend class ::grpc::ClientWriter; + template + friend class ::grpc::ClientReaderWriter; + template + friend class ::grpc::ServerReader; + template + friend class ::grpc::ServerWriter; + template + friend class ::grpc::internal::ServerReaderWriterBody; + template + friend class ::grpc::internal::RpcMethodHandler; + template + friend class ::grpc::internal::ClientStreamingHandler; + template + friend class ::grpc::internal::ServerStreamingHandler; + template + friend class ::grpc::internal::TemplatedBidiStreamingHandler; + friend class ::grpc::internal::UnknownMethodHandler; + friend class ::grpc::Server; + friend class ::grpc::ServerContext; + friend class ::grpc::ServerInterface; + template + friend class ::grpc::internal::BlockingUnaryCallImpl; + + /// EXPERIMENTAL + /// Creates a Thread Local cache to store the first event + /// On this completion queue queued from this thread. Once + /// initialized, it must be flushed on the same thread. + class CompletionQueueTLSCache { + public: + CompletionQueueTLSCache(CompletionQueue* cq); + ~CompletionQueueTLSCache(); + bool Flush(void** tag, bool* ok); + + private: + CompletionQueue* cq_; + bool flushed_; + }; + + NextStatus AsyncNextInternal(void** tag, bool* ok, gpr_timespec deadline); + + /// Wraps \a grpc_completion_queue_pluck. + /// \warning Must not be mixed with calls to \a Next. + bool Pluck(internal::CompletionQueueTag* tag) { + auto deadline = + g_core_codegen_interface->gpr_inf_future(GPR_CLOCK_REALTIME); + auto ev = g_core_codegen_interface->grpc_completion_queue_pluck( + cq_, tag, deadline, nullptr); + bool ok = ev.success != 0; + void* ignored = tag; + GPR_CODEGEN_ASSERT(tag->FinalizeResult(&ignored, &ok)); + GPR_CODEGEN_ASSERT(ignored == tag); + // Ignore mutations by FinalizeResult: Pluck returns the C API status + return ev.success != 0; + } + + /// Performs a single polling pluck on \a tag. + /// \warning Must not be mixed with calls to \a Next. + /// + /// TODO: sreek - This calls tag->FinalizeResult() even if the cq_ is already + /// shutdown. This is most likely a bug and if it is a bug, then change this + /// implementation to simple call the other TryPluck function with a zero + /// timeout. i.e: + /// TryPluck(tag, gpr_time_0(GPR_CLOCK_REALTIME)) + void TryPluck(internal::CompletionQueueTag* tag) { + auto deadline = g_core_codegen_interface->gpr_time_0(GPR_CLOCK_REALTIME); + auto ev = g_core_codegen_interface->grpc_completion_queue_pluck( + cq_, tag, deadline, nullptr); + if (ev.type == GRPC_QUEUE_TIMEOUT) return; + bool ok = ev.success != 0; + void* ignored = tag; + // the tag must be swallowed if using TryPluck + GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok)); + } + + /// Performs a single polling pluck on \a tag. Calls tag->FinalizeResult if + /// the pluck() was successful and returned the tag. + /// + /// This exects tag->FinalizeResult (if called) to return 'false' i.e expects + /// that the tag is internal not something that is returned to the user. + void TryPluck(internal::CompletionQueueTag* tag, gpr_timespec deadline) { + auto ev = g_core_codegen_interface->grpc_completion_queue_pluck( + cq_, tag, deadline, nullptr); + if (ev.type == GRPC_QUEUE_TIMEOUT || ev.type == GRPC_QUEUE_SHUTDOWN) { + return; + } + + bool ok = ev.success != 0; + void* ignored = tag; + GPR_CODEGEN_ASSERT(!tag->FinalizeResult(&ignored, &ok)); + } + + /// Manage state of avalanching operations : completion queue tags that + /// trigger other completion queue operations. The underlying core completion + /// queue should not really shutdown until all avalanching operations have + /// been finalized. Note that we maintain the requirement that an avalanche + /// registration must take place before CQ shutdown (which must be maintained + /// elsehwere) + void InitialAvalanching() { + gpr_atm_rel_store(&avalanches_in_flight_, static_cast(1)); + } + void RegisterAvalanching() { + gpr_atm_no_barrier_fetch_add(&avalanches_in_flight_, + static_cast(1)); + } + void CompleteAvalanching(); + + grpc_completion_queue* cq_; // owned + + gpr_atm avalanches_in_flight_; +}; + +/// A specific type of completion queue used by the processing of notifications +/// by servers. Instantiated by \a ServerBuilder. +class ServerCompletionQueue : public CompletionQueue { + public: + bool IsFrequentlyPolled() { return polling_type_ != GRPC_CQ_NON_LISTENING; } + + private: + grpc_cq_polling_type polling_type_; + friend class ServerBuilder; + /// \param is_frequently_polled Informs the GRPC library about whether the + /// server completion queue would be actively polled (by calling Next() or + /// AsyncNext()). By default all server completion queues are assumed to be + /// frequently polled. + ServerCompletionQueue(grpc_cq_polling_type polling_type) + : CompletionQueue(grpc_completion_queue_attributes{ + GRPC_CQ_CURRENT_VERSION, GRPC_CQ_NEXT, polling_type}), + polling_type_(polling_type) {} +}; + +} // namespace grpc + +#endif // GRPCPP_IMPL_CODEGEN_COMPLETION_QUEUE_H diff --git a/patches/grpc/grpc_library.h b/patches/grpc/grpc_library.h new file mode 100644 index 0000000000..4870a1cda4 --- /dev/null +++ b/patches/grpc/grpc_library.h @@ -0,0 +1,64 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef GRPCPP_IMPL_CODEGEN_GRPC_LIBRARY_H +#define GRPCPP_IMPL_CODEGEN_GRPC_LIBRARY_H + +#include + +#include + +namespace grpc { + +class GrpcLibraryInterface { + public: + virtual ~GrpcLibraryInterface() = default; + virtual void init() = 0; + virtual void shutdown() = 0; +}; + +/// Initialized by \a grpc::GrpcLibraryInitializer from +/// +extern GrpcLibraryInterface* g_glip; + +/// Classes that require gRPC to be initialized should inherit from this class. +class GrpcLibraryCodegen { + public: + GrpcLibraryCodegen(bool call_grpc_init = true) : grpc_init_called_(false) { + if (call_grpc_init) { + GPR_CODEGEN_ASSERT(g_glip && + "gRPC library not initialized. See " + "grpc::internal::GrpcLibraryInitializer."); + g_glip->init(); + grpc_init_called_ = true; + } + } + virtual ~GrpcLibraryCodegen() { + if (grpc_init_called_ && + typeid(*g_glip).hash_code() != + typeid(GrpcLibraryInterface).hash_code()) { + GPR_CODEGEN_ASSERT(g_glip && + "gRPC library not initialized. See " + "grpc::internal::GrpcLibraryInitializer."); + g_glip->shutdown(); + } + } + + private: + bool grpc_init_called_; +}; + +} // namespace grpc + +#endif // GRPCPP_IMPL_CODEGEN_GRPC_LIBRARY_H diff --git a/proto/README.md b/proto/README.md new file mode 100644 index 0000000000..dda7ed7b3c --- /dev/null +++ b/proto/README.md @@ -0,0 +1,3 @@ +## protos in this folder are legacy v2 protos. + +## Please refer to paddle/fluid for latest version. diff --git a/python/CMakeLists.txt b/python/CMakeLists.txt index ea25f3ab35..2590081150 100644 --- a/python/CMakeLists.txt +++ b/python/CMakeLists.txt @@ -1,4 +1,4 @@ -file(GLOB UTILS_PY_FILES . ./paddle/utils/*.py) +file(GLOB UTILS_PY_FILES . ./paddle/legacy/utils/*.py) file(GLOB_RECURSE FLUID_PY_FILES ./paddle/fluid/*.py) set(PY_FILES paddle/__init__.py ${UTILS_PY_FILES} @@ -91,3 +91,16 @@ endif() install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR} DESTINATION opt/paddle/share/wheels ) + +if(APPLE) + find_program(INSTALL_NAME_TOOL_EXECUTABLE install_name_tool) + if(NOT INSTALL_NAME_TOOL_EXECUTABLE) + message(FATAL_ERROR "install_name_tool not found, please check.\n") + endif() +else(APPLE) + find_program(PATCHELF_EXECUTABLE patchelf) + if(NOT PATCHELF_EXECUTABLE) + message(FATAL_ERROR "patchelf not found, please install it.\n" + "For Ubuntu, the command is: apt-get install -y patchelf.") + endif() +endif(APPLE) diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index d1cf04161a..241a07a352 100644 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -12,16 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. try: - from version import full_version as __version__ - from version import commit as __git_commit__ + from paddle.version import full_version as __version__ + from paddle.version import commit as __git_commit__ except ImportError: import sys - sys.stderr.write('''Warning with import paddle: you should not + sys.stderr.write('''Warning with import paddle: you should not import paddle from the source directory; please install paddlepaddle*.whl firstly.''' ) -import reader -import dataset -import batch +import paddle.reader +import paddle.dataset +import paddle.batch batch = batch.batch diff --git a/python/paddle/batch.py b/python/paddle/batch.py index 317cf037c6..0085096607 100644 --- a/python/paddle/batch.py +++ b/python/paddle/batch.py @@ -15,7 +15,7 @@ __all__ = ['batch'] -def batch(reader, batch_size): +def batch(reader, batch_size, drop_last=False): """ Create a batched reader. @@ -23,6 +23,8 @@ def batch(reader, batch_size): :type reader: callable :param batch_size: size of each mini-batch :type batch_size: int + :param drop_last: drop the last batch, if the size of last batch is not equal to batch_size. + :type drop_last: bool :return: the batched reader. :rtype: callable """ @@ -35,7 +37,13 @@ def batch(reader, batch_size): if len(b) == batch_size: yield b b = [] - if b: + if drop_last == False and len(b) != 0: yield b + # Batch size check + batch_size = int(batch_size) + if batch_size <= 0: + raise ValueError("batch_size should be a positive integeral value, " + "but got batch_size={}".format(batch_size)) + return batch_reader diff --git a/python/paddle/dataset/__init__.py b/python/paddle/dataset/__init__.py index 3315e826e8..54aa3edc51 100644 --- a/python/paddle/dataset/__init__.py +++ b/python/paddle/dataset/__init__.py @@ -15,20 +15,20 @@ Dataset package. """ -import mnist -import imikolov -import imdb -import cifar -import movielens -import conll05 -import uci_housing -import sentiment -import wmt14 -import wmt16 -import mq2007 -import flowers -import voc2012 -import image +import paddle.dataset.mnist +import paddle.dataset.imikolov +import paddle.dataset.imdb +import paddle.dataset.cifar +import paddle.dataset.movielens +import paddle.dataset.conll05 +import paddle.dataset.uci_housing +import paddle.dataset.sentiment +import paddle.dataset.wmt14 +import paddle.dataset.wmt16 +import paddle.dataset.mq2007 +import paddle.dataset.flowers +import paddle.dataset.voc2012 +import paddle.dataset.image __all__ = [ 'mnist', diff --git a/python/paddle/dataset/cifar.py b/python/paddle/dataset/cifar.py index 07f4dcbdab..f6b4ff8fbd 100644 --- a/python/paddle/dataset/cifar.py +++ b/python/paddle/dataset/cifar.py @@ -28,11 +28,12 @@ images per class. """ -import cPickle import itertools import numpy import paddle.dataset.common import tarfile +from six.moves import zip +from six.moves import cPickle as pickle __all__ = ['train100', 'test100', 'train10', 'test10', 'convert'] @@ -43,12 +44,12 @@ CIFAR100_URL = URL_PREFIX + 'cifar-100-python.tar.gz' CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85' -def reader_creator(filename, sub_name): +def reader_creator(filename, sub_name, cycle=False): def read_batch(batch): data = batch['data'] labels = batch.get('labels', batch.get('fine_labels', None)) assert labels is not None - for sample, label in itertools.izip(data, labels): + for sample, label in zip(data, labels): yield (sample / 255.0).astype(numpy.float32), int(label) def reader(): @@ -56,10 +57,13 @@ def reader_creator(filename, sub_name): names = (each_item.name for each_item in f if sub_name in each_item.name) - for name in names: - batch = cPickle.load(f.extractfile(name)) - for item in read_batch(batch): - yield item + while True: + for name in names: + batch = pickle.load(f.extractfile(name)) + for item in read_batch(batch): + yield item + if not cycle: + break return reader @@ -94,34 +98,40 @@ def test100(): 'test') -def train10(): +def train10(cycle=False): """ CIFAR-10 training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: Training reader creator :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), - 'data_batch') + 'data_batch', + cycle=cycle) -def test10(): +def test10(cycle=False): """ CIFAR-10 test set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: Test reader creator. :rtype: callable """ return reader_creator( paddle.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), - 'test_batch') + 'test_batch', + cycle=cycle) def fetch(): diff --git a/python/paddle/dataset/common.py b/python/paddle/dataset/common.py index 68660601c1..6195cc50df 100644 --- a/python/paddle/dataset/common.py +++ b/python/paddle/dataset/common.py @@ -20,9 +20,8 @@ import shutil import sys import importlib import paddle.dataset -import cPickle +import six.moves.cPickle as pickle import glob -import cPickle as pickle __all__ = [ 'DATA_HOME', @@ -75,13 +74,13 @@ def download(url, module_name, md5sum, save_name=None): retry_limit = 3 while not (os.path.exists(filename) and md5file(filename) == md5sum): if os.path.exists(filename): - print "file md5", md5file(filename), md5sum + print("file md5", md5file(filename), md5sum) if retry < retry_limit: retry += 1 else: raise RuntimeError("Cannot download {0} within retry limit {1}". format(url, retry_limit)) - print "Cache file %s not found, downloading %s" % (filename, url) + print("Cache file %s not found, downloading %s" % (filename, url)) r = requests.get(url, stream=True) total_length = r.headers.get('content-length') @@ -104,8 +103,9 @@ def download(url, module_name, md5sum, save_name=None): def fetch_all(): - for module_name in filter(lambda x: not x.startswith("__"), - dir(paddle.dataset)): + for module_name in [ + x for x in dir(paddle.dataset) if not x.startswith("__") + ]: if "fetch" in dir( importlib.import_module("paddle.dataset.%s" % module_name)): getattr( @@ -114,8 +114,9 @@ def fetch_all(): def fetch_all_recordio(path): - for module_name in filter(lambda x: not x.startswith("__"), - dir(paddle.dataset)): + for module_name in [ + x for x in dir(paddle.dataset) if not x.startswith("__") + ]: if "convert" in dir( importlib.import_module("paddle.dataset.%s" % module_name)) and \ not module_name == "common": @@ -126,7 +127,7 @@ def fetch_all_recordio(path): "convert")(ds_path) -def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump): +def split(reader, line_count, suffix="%05d.pickle", dumper=pickle.dump): """ you can call the function as: @@ -167,7 +168,7 @@ def split(reader, line_count, suffix="%05d.pickle", dumper=cPickle.dump): def cluster_files_reader(files_pattern, trainer_count, trainer_id, - loader=cPickle.load): + loader=pickle.load): """ Create a reader that yield element from the given files, select a file set according trainer count and trainer_id @@ -188,7 +189,7 @@ def cluster_files_reader(files_pattern, my_file_list = [] for idx, fn in enumerate(file_list): if idx % trainer_count == trainer_id: - print "append file: %s" % fn + print("append file: %s" % fn) my_file_list.append(fn) for fn in my_file_list: with open(fn, "r") as f: @@ -221,7 +222,7 @@ def convert(output_path, reader, line_count, name_prefix): for l in lines: # FIXME(Yancey1989): # dumps with protocol: pickle.HIGHEST_PROTOCOL - writer.write(cPickle.dumps(l)) + writer.write(pickle.dumps(l)) writer.close() lines = [] diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py index 4e94ce8989..a97c95d067 100644 --- a/python/paddle/dataset/conll05.py +++ b/python/paddle/dataset/conll05.py @@ -24,18 +24,19 @@ import tarfile import gzip import itertools import paddle.dataset.common +from six.moves import zip __all__ = ['test, get_dict', 'get_embedding', 'convert'] DATA_URL = 'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz' DATA_MD5 = '387719152ae52d60422c016e92a742fc' -WORDDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt' +WORDDICT_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2FwordDict.txt' WORDDICT_MD5 = 'ea7fb7d4c75cc6254716f0177a506baa' -VERBDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt' +VERBDICT_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2FverbDict.txt' VERBDICT_MD5 = '0d2977293bbb6cbefab5b0f97db1e77c' -TRGDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt' +TRGDICT_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2FtargetDict.txt' TRGDICT_MD5 = 'd8c7f03ceb5fc2e5a0fa7503a4353751' -EMB_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb' +EMB_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2Femb' EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7' UNK_IDX = 0 @@ -87,12 +88,12 @@ def corpus_reader(data_path, words_name, props_name): sentences = [] labels = [] one_seg = [] - for word, label in itertools.izip(words_file, props_file): + for word, label in zip(words_file, props_file): word = word.strip() label = label.strip().split() if len(label) == 0: # end of sentence - for i in xrange(len(one_seg[0])): + for i in range(len(one_seg[0])): a_kind_lable = [x[i] for x in one_seg] labels.append(a_kind_lable) diff --git a/python/paddle/dataset/flowers.py b/python/paddle/dataset/flowers.py index f082e33be3..914dae348b 100644 --- a/python/paddle/dataset/flowers.py +++ b/python/paddle/dataset/flowers.py @@ -28,10 +28,9 @@ Graphics and Image Processing (2008) http://www.robots.ox.ac.uk/~vgg/publications/papers/nilsback08.{pdf,ps.gz}. """ -import cPickle import itertools import functools -from common import download +from .common import download import tarfile import scipy.io as scio from paddle.dataset.image import * @@ -39,6 +38,8 @@ from paddle.reader import * import os import numpy as np from multiprocessing import cpu_count +from six.moves import cPickle as pickle +from six.moves import zip __all__ = ['train', 'test', 'valid'] DATA_URL = 'http://www.robots.ox.ac.uk/~vgg/data/flowers/102/102flowers.tgz' @@ -76,7 +77,8 @@ def reader_creator(data_file, dataset_name, mapper, buffered_size=1024, - use_xmap=True): + use_xmap=True, + cycle=False): ''' 1. read images from tar file and merge images into batch files in 102flowers.tgz_batch/ @@ -96,6 +98,8 @@ def reader_creator(data_file, :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: data reader :rtype: callable ''' @@ -108,23 +112,27 @@ def reader_creator(data_file, file_list = batch_images_from_tar(data_file, dataset_name, img2label) def reader(): - for file in open(file_list): - file = file.strip() - batch = None - with open(file, 'r') as f: - batch = cPickle.load(f) - data = batch['data'] - labels = batch['label'] - for sample, label in itertools.izip(data, batch['label']): - yield sample, int(label) - 1 + while True: + for file in open(file_list): + file = file.strip() + batch = None + with open(file, 'r') as f: + batch = pickle.load(f) + data = batch['data'] + labels = batch['label'] + for sample, label in zip(data, batch['label']): + yield sample, int(label) - 1 + if not cycle: + break if use_xmap: - return xmap_readers(mapper, reader, cpu_count(), buffered_size) + cpu_num = int(os.environ.get('CPU_NUM', cpu_count())) + return xmap_readers(mapper, reader, cpu_num, buffered_size) else: return map_readers(mapper, reader) -def train(mapper=train_mapper, buffered_size=1024, use_xmap=True): +def train(mapper=train_mapper, buffered_size=1024, use_xmap=True, cycle=False): ''' Create flowers training set reader. It returns a reader, each sample in the reader is @@ -137,17 +145,23 @@ def train(mapper=train_mapper, buffered_size=1024, use_xmap=True): :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: train data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), TRAIN_FLAG, mapper, - buffered_size, use_xmap) + download(SETID_URL, 'flowers', SETID_MD5), + TRAIN_FLAG, + mapper, + buffered_size, + use_xmap, + cycle=cycle) -def test(mapper=test_mapper, buffered_size=1024, use_xmap=True): +def test(mapper=test_mapper, buffered_size=1024, use_xmap=True, cycle=False): ''' Create flowers test set reader. It returns a reader, each sample in the reader is @@ -160,14 +174,20 @@ def test(mapper=test_mapper, buffered_size=1024, use_xmap=True): :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: test data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), TEST_FLAG, mapper, - buffered_size, use_xmap) + download(SETID_URL, 'flowers', SETID_MD5), + TEST_FLAG, + mapper, + buffered_size, + use_xmap, + cycle=cycle) def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True): diff --git a/python/paddle/dataset/image.py b/python/paddle/dataset/image.py index 9235c41e9e..3b3d89c93c 100644 --- a/python/paddle/dataset/image.py +++ b/python/paddle/dataset/image.py @@ -36,7 +36,7 @@ except ImportError: cv2 = None import os import tarfile -import cPickle +import six.moves.cPickle as pickle __all__ = [ "load_image_bytes", "load_image", "resize_short", "to_chw", "center_crop", @@ -86,10 +86,10 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - cPickle.dump( + pickle.dump( output, open('%s/batch_%d' % (out_path, file_id), 'w'), - protocol=cPickle.HIGHEST_PROTOCOL) + protocol=pickle.HIGHEST_PROTOCOL) file_id += 1 data = [] labels = [] @@ -97,10 +97,10 @@ def batch_images_from_tar(data_file, output = {} output['label'] = labels output['data'] = data - cPickle.dump( + pickle.dump( output, open('%s/batch_%d' % (out_path, file_id), 'w'), - protocol=cPickle.HIGHEST_PROTOCOL) + protocol=pickle.HIGHEST_PROTOCOL) with open(meta_file, 'a') as meta: for file in os.listdir(out_path): diff --git a/python/paddle/dataset/imdb.py b/python/paddle/dataset/imdb.py index 5ff05b1e9b..e7fe4e0b7e 100644 --- a/python/paddle/dataset/imdb.py +++ b/python/paddle/dataset/imdb.py @@ -42,13 +42,13 @@ def tokenize(pattern): # sequential access of member files, other than # tarfile.extractfile, which does random access and might # destroy hard disks. - tf = tarf.next() + tf = next(tarf) while tf != None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip("\n\r").translate( None, string.punctuation).lower().split() - tf = tarf.next() + tf = next(tarf) def build_dict(pattern, cutoff): @@ -62,11 +62,11 @@ def build_dict(pattern, cutoff): word_freq[word] += 1 # Not sure if we should prune less-frequent words here. - word_freq = filter(lambda x: x[1] > cutoff, word_freq.items()) + word_freq = [x for x in list(word_freq.items()) if x[1] > cutoff] dictionary = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*dictionary)) - word_idx = dict(zip(words, xrange(len(words)))) + word_idx = dict(list(zip(words, list(range(len(words)))))) word_idx[''] = len(words) return word_idx diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py index c6c0a0f543..bc007c9d3c 100644 --- a/python/paddle/dataset/imikolov.py +++ b/python/paddle/dataset/imikolov.py @@ -64,11 +64,11 @@ def build_dict(min_word_freq=50): # remove for now, since we will set it as last index del word_freq[''] - word_freq = filter(lambda x: x[1] > min_word_freq, word_freq.items()) + word_freq = [x for x in list(word_freq.items()) if x[1] > min_word_freq] word_freq_sorted = sorted(word_freq, key=lambda x: (-x[1], x[0])) words, _ = list(zip(*word_freq_sorted)) - word_idx = dict(zip(words, xrange(len(words)))) + word_idx = dict(list(zip(words, list(range(len(words)))))) word_idx[''] = len(words) return word_idx diff --git a/python/paddle/dataset/mnist.py b/python/paddle/dataset/mnist.py index 6a1b8b5fac..ffa9008c80 100644 --- a/python/paddle/dataset/mnist.py +++ b/python/paddle/dataset/mnist.py @@ -65,11 +65,17 @@ def reader_creator(image_filename, label_filename, buffer_size): images = images / 255.0 * 2.0 - 1.0 - for i in xrange(buffer_size): + for i in range(buffer_size): yield images[i, :], int(labels[i]) finally: - m.terminate() - l.terminate() + try: + m.terminate() + except: + pass + try: + l.terminate() + except: + pass return reader @@ -111,7 +117,7 @@ def fetch(): paddle.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5) paddle.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) paddle.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5) - paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) + paddle.dataset.common.download(TEST_LABEL_URL, 'mnist', TEST_LABEL_MD5) def convert(path): diff --git a/python/paddle/dataset/movielens.py b/python/paddle/dataset/movielens.py index ab11716202..056ec21786 100644 --- a/python/paddle/dataset/movielens.py +++ b/python/paddle/dataset/movielens.py @@ -16,7 +16,7 @@ Movielens 1-M dataset. Movielens 1-M dataset contains 1 million ratings from 6000 users on 4000 movies, which was collected by GroupLens Research. This module will download -Movielens 1-M dataset from +Movielens 1-M dataset from http://files.grouplens.org/datasets/movielens/ml-1m.zip and parse training set and test set into paddle reader creators. @@ -187,7 +187,7 @@ def max_movie_id(): Get the maximum value of movie id. """ __initialize_meta_info__() - return reduce(__max_index_info__, MOVIE_INFO.viewvalues()).index + return reduce(__max_index_info__, list(MOVIE_INFO.values())).index def max_user_id(): @@ -195,7 +195,7 @@ def max_user_id(): Get the maximum value of user id. """ __initialize_meta_info__() - return reduce(__max_index_info__, USER_INFO.viewvalues()).index + return reduce(__max_index_info__, list(USER_INFO.values())).index def __max_job_id_impl__(a, b): @@ -210,7 +210,7 @@ def max_job_id(): Get the maximum value of job id. """ __initialize_meta_info__() - return reduce(__max_job_id_impl__, USER_INFO.viewvalues()).job_id + return reduce(__max_job_id_impl__, list(USER_INFO.values())).job_id def movie_categories(): @@ -243,7 +243,7 @@ def unittest(): for test_count, _ in enumerate(test()()): pass - print train_count, test_count + print(train_count, test_count) def fetch(): diff --git a/python/paddle/dataset/mq2007.py b/python/paddle/dataset/mq2007.py index d3b3dd524c..cc4d088316 100644 --- a/python/paddle/dataset/mq2007.py +++ b/python/paddle/dataset/mq2007.py @@ -26,7 +26,7 @@ http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ20 import os import functools import rarfile -from common import download +from .common import download import numpy as np # URL = "http://research.microsoft.com/en-us/um/beijing/projects/letor/LETOR4.0/Data/MQ2007.rar" @@ -53,7 +53,7 @@ class Query(object): ---------- query_id : int query_id in dataset, mapping from query to relevance documents - relevance_score : int + relevance_score : int relevance score of query and document pair feature_vector : array, dense feature feature in vector format @@ -92,7 +92,7 @@ class Query(object): sys.stdout.write("expect 48 space split parts, get %d" % (len(parts))) return None - # format : 0 qid:10 1:0.000272 2:0.000000 .... + # format : 0 qid:10 1:0.000272 2:0.000000 .... self.relevance_score = int(parts[0]) self.query_id = int(parts[1].split(':')[1]) for p in parts[2:]: @@ -295,7 +295,7 @@ def __reader__(filepath, format="pairwise", shuffle=False, fill_missing=-1): -------- filename : string fill_missing : fill the missing value. default in MQ2007 is -1 - + Returns ------ yield @@ -330,4 +330,4 @@ if __name__ == "__main__": mytest = functools.partial( __reader__, filepath="MQ2007/MQ2007/Fold1/sample", format="listwise") for label, query in mytest(): - print label, query + print(label, query) diff --git a/python/paddle/dataset/sentiment.py b/python/paddle/dataset/sentiment.py index f5461164fe..953ada057b 100644 --- a/python/paddle/dataset/sentiment.py +++ b/python/paddle/dataset/sentiment.py @@ -43,11 +43,11 @@ def download_data_if_not_yet(): nltk.data.path.append(paddle.dataset.common.DATA_HOME) movie_reviews.categories() except LookupError: - print "Downloading movie_reviews data set, please wait....." + print("Downloading movie_reviews data set, please wait.....") nltk.download( 'movie_reviews', download_dir=paddle.dataset.common.DATA_HOME) - print "Download data set success....." - print "Path is " + nltk.data.find('corpora/movie_reviews').path + print("Download data set success.....") + print("Path is " + nltk.data.find('corpora/movie_reviews').path) def get_word_dict(): @@ -64,7 +64,7 @@ def get_word_dict(): for field in movie_reviews.fileids(category): for words in movie_reviews.words(field): word_freq_dict[words] += 1 - words_sort_list = word_freq_dict.items() + words_sort_list = list(word_freq_dict.items()) words_sort_list.sort(cmp=lambda a, b: b[1] - a[1]) for index, word in enumerate(words_sort_list): words_freq_sorted.append((word[0], index)) @@ -80,7 +80,8 @@ def sort_files(): files_list = list() neg_file_list = movie_reviews.fileids('neg') pos_file_list = movie_reviews.fileids('pos') - files_list = list(chain.from_iterable(zip(neg_file_list, pos_file_list))) + files_list = list( + chain.from_iterable(list(zip(neg_file_list, pos_file_list)))) return files_list diff --git a/python/paddle/dataset/tests/common_test.py b/python/paddle/dataset/tests/common_test.py index e7cc02aa83..777cd06a19 100644 --- a/python/paddle/dataset/tests/common_test.py +++ b/python/paddle/dataset/tests/common_test.py @@ -36,7 +36,7 @@ class TestCommon(unittest.TestCase): def test_split(self): def test_reader(): def reader(): - for x in xrange(10): + for x in range(10): yield x return reader @@ -49,7 +49,7 @@ class TestCommon(unittest.TestCase): def test_cluster_file_reader(self): _, temp_path = tempfile.mkstemp() - for x in xrange(5): + for x in range(5): with open(temp_path + '/%05d.test' % x) as f: f.write('%d\n' % x) reader = paddle.dataset.common.cluster_files_reader( @@ -63,7 +63,7 @@ class TestCommon(unittest.TestCase): def test_reader(): def reader(): - for x in xrange(record_num): + for x in range(record_num): yield x return reader diff --git a/python/paddle/dataset/tests/imikolov_test.py b/python/paddle/dataset/tests/imikolov_test.py index 233fd9fc8c..50f50d947d 100644 --- a/python/paddle/dataset/tests/imikolov_test.py +++ b/python/paddle/dataset/tests/imikolov_test.py @@ -59,7 +59,7 @@ class TestMikolov(unittest.TestCase): self.assertEqual(first_line, read_line) def test_total(self): - _, idx = zip(*WORD_DICT.items()) + _, idx = list(zip(*list(WORD_DICT.items()))) self.assertEqual(sorted(idx)[-1], len(WORD_DICT) - 1) diff --git a/python/paddle/dataset/tests/test_sentiment.py b/python/paddle/dataset/tests/test_sentiment.py index 543f4b7378..37326517f7 100644 --- a/python/paddle/dataset/tests/test_sentiment.py +++ b/python/paddle/dataset/tests/test_sentiment.py @@ -24,9 +24,8 @@ from nltk.corpus import movie_reviews class TestSentimentMethods(unittest.TestCase): def test_get_word_dict(self): word_dict = st.get_word_dict()[0:10] - test_word_list = [(u',', 0), (u'the', 1), (u'.', 2), (u'a', 3), - (u'and', 4), (u'of', 5), (u'to', 6), (u"'", 7), - (u'is', 8), (u'in', 9)] + test_word_list = [(',', 0), ('the', 1), ('.', 2), ('a', 3), ('and', 4), + ('of', 5), ('to', 6), ("'", 7), ('is', 8), ('in', 9)] for idx, each in enumerate(word_dict): self.assertEqual(each, test_word_list[idx]) self.assertTrue("/root/.cache/paddle/dataset" in nltk.data.path) diff --git a/python/paddle/dataset/uci_housing.py b/python/paddle/dataset/uci_housing.py index fbfa477d05..410ca7af0d 100644 --- a/python/paddle/dataset/uci_housing.py +++ b/python/paddle/dataset/uci_housing.py @@ -49,9 +49,12 @@ def feature_range(maximums, minimums): import matplotlib.pyplot as plt fig, ax = plt.subplots() feature_num = len(maximums) - ax.bar(range(feature_num), maximums - minimums, color='r', align='center') + ax.bar(list(range(feature_num)), + maximums - minimums, + color='r', + align='center') ax.set_title('feature scale') - plt.xticks(range(feature_num), feature_names) + plt.xticks(list(range(feature_num)), feature_names) plt.xlim([-1, feature_num]) fig.set_figheight(6) fig.set_figwidth(10) @@ -71,7 +74,7 @@ def load_data(filename, feature_num=14, ratio=0.8): maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum( axis=0) / data.shape[0] feature_range(maximums[:-1], minimums[:-1]) - for i in xrange(feature_num - 1): + for i in range(feature_num - 1): data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i]) offset = int(data.shape[0] * ratio) UCI_TRAIN_DATA = data[:offset] diff --git a/python/paddle/dataset/wmt14.py b/python/paddle/dataset/wmt14.py index f0908c7378..7504474591 100644 --- a/python/paddle/dataset/wmt14.py +++ b/python/paddle/dataset/wmt14.py @@ -36,11 +36,10 @@ URL_DEV_TEST = ('http://www-lium.univ-lemans.fr/~schwenk/' MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' # this is a small set of data for test. The original data is too large and # will be add later. -URL_TRAIN = ('http://paddlepaddle.cdn.bcebos.com/demo/' - 'wmt_shrinked_data/wmt14.tgz') +URL_TRAIN = ('http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz') MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' # BLEU of this trained model is 26.92 -URL_MODEL = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_14/wmt14_model.tar.gz' +URL_MODEL = 'http://paddlemodels.bj.bcebos.com/wmt%2Fwmt14.tgz' MD5_MODEL = '0cb4a5366189b6acba876491c8724fa3' START = "" @@ -154,8 +153,8 @@ def get_dict(dict_size, reverse=True): tar_file = paddle.dataset.common.download(URL_TRAIN, 'wmt14', MD5_TRAIN) src_dict, trg_dict = __read_to_dict(tar_file, dict_size) if reverse: - src_dict = {v: k for k, v in src_dict.items()} - trg_dict = {v: k for k, v in trg_dict.items()} + src_dict = {v: k for k, v in list(src_dict.items())} + trg_dict = {v: k for k, v in list(trg_dict.items())} return src_dict, trg_dict diff --git a/python/paddle/dataset/wmt16.py b/python/paddle/dataset/wmt16.py index ad23338a96..4e3c466c38 100644 --- a/python/paddle/dataset/wmt16.py +++ b/python/paddle/dataset/wmt16.py @@ -70,7 +70,9 @@ def __build_dict(tar_file, dict_size, save_path, lang): fout.write("%s\n%s\n%s\n" % (START_MARK, END_MARK, UNK_MARK)) for idx, word in enumerate( sorted( - word_dict.iteritems(), key=lambda x: x[1], reverse=True)): + iter(list(word_dict.items())), + key=lambda x: x[1], + reverse=True)): if idx + 3 == dict_size: break fout.write("%s\n" % (word[0])) @@ -96,7 +98,7 @@ def __get_dict_size(src_dict_size, trg_dict_size, src_lang): src_dict_size = min(src_dict_size, (TOTAL_EN_WORDS if src_lang == "en" else TOTAL_DE_WORDS)) trg_dict_size = min(trg_dict_size, (TOTAL_DE_WORDS if src_lang == "en" else - TOTAL_ENG_WORDS)) + TOTAL_EN_WORDS)) return src_dict_size, trg_dict_size diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index c8a435748d..1ae05dec8d 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -14,72 +14,79 @@ from __future__ import print_function # import all class inside framework into fluid module -import framework -from framework import * +from . import framework +from .framework import * # import all class inside executor into fluid module -import executor -from executor import * - -import trainer -from trainer import Trainer -from trainer import BeginEpochEvent -from trainer import EndEpochEvent -from trainer import BeginStepEvent -from trainer import EndStepEvent - -import inferencer -from inferencer import Inferencer - -import io -import evaluator -import initializer -import layers -import nets -import optimizer -import backward -import regularizer -import average -import metrics -import transpiler -from param_attr import ParamAttr, WeightNormParamAttr -from data_feeder import DataFeeder -from core import LoDTensor, CPUPlace, CUDAPlace, CUDAPinnedPlace -from transpiler import DistributeTranspiler, SimpleDistributeTranspiler, InferenceTranspiler, memory_optimize, release_memory -from concurrency import (Go, make_channel, channel_send, channel_recv, - channel_close, Select) -import clip -import profiler -import unique_name -import recordio_writer -from parallel_executor import ParallelExecutor +from . import executor +from .executor import * + +from . import trainer +from .trainer import Trainer +from .trainer import BeginEpochEvent +from .trainer import EndEpochEvent +from .trainer import BeginStepEvent +from .trainer import EndStepEvent +from .trainer import CheckpointConfig + +from . import inferencer +from .inferencer import Inferencer + +from . import io +from . import evaluator +from . import initializer +from . import layers +from . import contrib +from . import nets +from . import optimizer +from . import backward +from . import regularizer +from . import average +from . import metrics +from . import transpiler +from .param_attr import ParamAttr, WeightNormParamAttr +from .data_feeder import DataFeeder +from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope +from .transpiler import DistributeTranspiler, InferenceTranspiler, \ + memory_optimize, release_memory, DistributeTranspilerConfig +from .lod_tensor import create_lod_tensor, create_random_int_lodtensor +from . import clip +from . import profiler +from . import unique_name +from . import recordio_writer +from . import parallel_executor +from .parallel_executor import * +from paddle.fluid.layers.math_op_patch import monkey_patch_variable Tensor = LoDTensor -__all__ = framework.__all__ + executor.__all__ + concurrency.__all__ +\ - trainer.__all__ + inferencer.__all__ + transpiler.__all__ + [ - 'io', - 'initializer', - 'layers', - 'transpiler' - 'nets', - 'optimizer', - 'learning_rate_decay', - 'backward', - 'regularizer', - 'LoDTensor', - 'CPUPlace', - 'CUDAPlace', - 'CUDAPinnedPlace', - 'Tensor', - 'ParamAttr', - 'WeightNormParamAttr', - 'DataFeeder', - 'clip', - 'profiler', - 'unique_name', - 'recordio_writer', - 'ParallelExecutor', -] +__all__ = framework.__all__ + executor.__all__ + \ + trainer.__all__ + inferencer.__all__ + transpiler.__all__ + \ + parallel_executor.__all__ + lod_tensor.__all__ + [ + 'io', + 'initializer', + 'layers', + 'contrib', + 'transpiler', + 'nets', + 'optimizer', + 'learning_rate_decay', + 'backward', + 'regularizer', + 'LoDTensor', + 'LoDTensorArray', + 'CPUPlace', + 'CUDAPlace', + 'CUDAPinnedPlace', + 'Tensor', + 'ParamAttr', + 'WeightNormParamAttr', + 'DataFeeder', + 'clip', + 'profiler', + 'unique_name', + 'recordio_writer', + 'Scope', + ] def __bootstrap__(): @@ -90,8 +97,8 @@ def __bootstrap__(): None """ import sys - import core import os + from . import core in_test = 'unittest' in sys.modules @@ -113,11 +120,18 @@ def __bootstrap__(): read_env_flags = [ 'use_pinned_memory', 'check_nan_inf', 'benchmark', 'warpctc_dir', - 'eager_delete_scope' + 'eager_delete_scope', 'use_mkldnn', 'initial_cpu_memory_in_mb', + 'init_allocated_mem', 'free_idle_memory', 'paddle_num_threads', + 'cpu_deterministic' ] + if core.is_compiled_with_dist(): + read_env_flags.append('rpc_deadline') + read_env_flags.append('rpc_server_profile_period') + read_env_flags.append('rpc_server_profile_path') + if core.is_compiled_with_cuda(): read_env_flags += [ - 'fraction_of_gpu_memory_to_use', 'cudnn_algo_use_autotune' + 'fraction_of_gpu_memory_to_use', 'cudnn_deterministic' ] core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) @@ -128,5 +142,5 @@ def __bootstrap__(): # TODO(panyx0718): Avoid doing complex initialization logic in __init__.py. # Consider paddle.init(args) or paddle.main(args) -layers.monkey_patch_variable() +monkey_patch_variable() __bootstrap__() diff --git a/python/paddle/fluid/annotations.py b/python/paddle/fluid/annotations.py new file mode 100644 index 0000000000..15e7976354 --- /dev/null +++ b/python/paddle/fluid/annotations.py @@ -0,0 +1,39 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import functools +import sys + +__all__ = ['deprecated'] + + +def deprecated(since, instead, extra_message=""): + def decorator(func): + err_msg = "API {0} is deprecated since {1}. Please use {2} instead.".format( + func.__name__, since, instead) + if len(extra_message) != 0: + err_msg += "\n" + err_msg += extra_message + + @functools.wraps(func) + def wrapper(*args, **kwargs): + print(err_msg, file=sys.stderr) + return func(*args, **kwargs) + + wrapper.__doc__ += "\n " + wrapper.__doc__ += err_msg + return wrapper + + return decorator diff --git a/python/paddle/fluid/average.py b/python/paddle/fluid/average.py index 6abe8233b0..358e24df31 100644 --- a/python/paddle/fluid/average.py +++ b/python/paddle/fluid/average.py @@ -36,6 +36,25 @@ def _is_number_or_matrix_(var): class WeightedAverage(object): + """ + Calculate weighted average. + + The average calculating is accomplished via Python totally. + They do not change Paddle's Program, nor do anything to + modify NN model's configuration. They are completely + wrappers of Python functions. + + Examples: + .. code-block:: python + avg = fluid.average.WeightedAverage() + avg.add(value=2.0, weight=1) + avg.add(value=4.0, weight=2) + avg.eval() + + # The result is 3.333333333. + # For (2.0 * 1 + 4.0 * 2) / (1 + 2) = 3.333333333 + """ + def __init__(self): warnings.warn( "The %s is deprecated, please use fluid.metrics.Accuracy instead." % diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 7af6ed1463..fd6a76dd0c 100644 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -16,12 +16,10 @@ from paddle.fluid import framework as framework from . import core import collections import copy -import unique_name +import six +from . import unique_name -__all__ = [ - 'append_backward', - 'calc_gradient', -] +__all__ = ['append_backward'] def _rename_arg_(op_descs, old_name, new_name, begin_idx=None, end_idx=None): @@ -47,11 +45,25 @@ def _create_op_desc_(op_type, inputs, outputs, attrs): """ op_desc = core.OpDesc() op_desc.set_type(op_type) - for para, args in inputs.iteritems(): - op_desc.set_input(para, args) - for para, args in outputs.iteritems(): - op_desc.set_output(para, args) - for name, val in attrs.iteritems(): + for para, args in list(inputs.items()): + op_desc.set_input( + para, + list( + map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg, + args))) + for para, args in list(outputs.items()): + op_desc.set_output( + para, + list( + map(lambda arg: arg.decode() if isinstance(arg, six.binary_type) else arg, + args))) + + op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() + + if op_role_attr_name not in attrs: + attrs[ + op_role_attr_name] = core.op_proto_and_checker_maker.OpRole.Backward + for name, val in list(attrs.items()): if isinstance(val, framework.Block): op_desc.set_block_attr(name, val.desc) else: @@ -102,7 +114,9 @@ def _strip_grad_suffix_(name): e.g. x@GRAD ==> x y@GRAD@RENAME@1 ==> y """ - pos = name.find(core.grad_var_suffix()) + if isinstance(name, six.text_type): + name = name.encode() + pos = name.find(six.b(core.grad_var_suffix())) return name[:pos] if pos != -1 else name @@ -111,13 +125,16 @@ def _append_grad_suffix_(name): Append grad suffix to the given variable name e.g. x ==> x@GRAD """ - return name + core.grad_var_suffix() + if isinstance(name, six.text_type): + name = name.encode() + return name + six.b(core.grad_var_suffix()) def _addup_repetitive_outputs_(op_descs): """ In backward part, an variable may be the output of more than one ops. - In this case, the variable should be the accumulation of all the outputs. + And one op may yield its multiple outputs to the same variable. + In these cases, the variable should be the accumulation of all the outputs. `sum_op`s are added to implement the accumulate. """ pending_sum_ops = [] @@ -126,37 +143,55 @@ def _addup_repetitive_outputs_(op_descs): for idx, op_desc in enumerate(op_descs): for var_name in op_desc.input_arg_names(): if len(renamed_vars[var_name]) > 1: - pending_sum_ops.append( - (_create_op_desc_("sum", {"X": renamed_vars[var_name]}, - {"Out": [var_name]}, {}), idx)) + pending_sum_ops.append((_create_op_desc_( + "sum", {"X": renamed_vars[var_name]}, {"Out": [var_name]}, + {"use_mkldnn": False}), idx)) renamed_vars[var_name] = [var_name] - for var_name in op_desc.output_arg_names(): - if var_name == core.empty_var_name( - ) or var_name in op_desc.input_arg_names(): - # empty variable or inplace op - continue - if len(renamed_vars[var_name]) == 0: - # it's the first time we get the variable - renamed_vars[var_name] = [var_name] - else: - if len(renamed_vars[var_name]) == 1: + for param_idx, param_name in enumerate(op_desc.output_names()): + arg_names = op_desc.output(param_name) + for arg_idx, var_name in enumerate(arg_names): + if var_name == core.empty_var_name( + ) or var_name in op_desc.input_arg_names(): + # empty variable or inplace op + continue + if len(renamed_vars[var_name]) == 0: + # it's the first time we get the variable + renamed_vars[var_name] = [var_name] + else: + if len(renamed_vars[var_name]) == 1: + new_name = var_name + "@RENAME@" + \ + str(var_rename_count[var_name]) + var_rename_count[var_name] += 1 + # rename original var_name + renamed_vars[var_name][0] = new_name + _rename_arg_(op_descs, var_name, new_name, 0, idx) + _rename_arg_(pending_sum_ops, var_name, new_name) + + for p in op_desc.output_names()[:param_idx]: + p_arg_names = op_desc.output(p) + if var_name in p_arg_names: + op_desc.set_output(p, [ + new_name if x == var_name else x + for x in p_arg_names + ]) + + arg_names = [ + new_name if x == var_name else x + for x in arg_names[:arg_idx] + ] + arg_names[arg_idx:] + new_name = var_name + "@RENAME@" + \ str(var_rename_count[var_name]) var_rename_count[var_name] += 1 - # rename original var_name - renamed_vars[var_name][0] = new_name - _rename_arg_(op_descs, var_name, new_name, 0, idx) - _rename_arg_(pending_sum_ops, var_name, new_name) - - new_name = var_name + "@RENAME@" + \ - str(var_rename_count[var_name]) - var_rename_count[var_name] += 1 - op_desc.rename_output(var_name, new_name) - renamed_vars[var_name].append(new_name) - for var_name, inputs in renamed_vars.iteritems(): + arg_names[arg_idx] = new_name + op_desc.set_output(param_name, arg_names) + renamed_vars[var_name].append(new_name) + + for var_name, inputs in list(renamed_vars.items()): if len(inputs) > 1: - pending_sum_ops.append((_create_op_desc_( - "sum", {"X": inputs}, {"Out": [var_name]}, {}), len(op_descs))) + pending_sum_ops.append( + (_create_op_desc_("sum", {"X": inputs}, {"Out": [var_name]}, + {"use_mkldnn": False}), len(op_descs))) # sum_op descs are sorted according to their insert position for p in reversed(pending_sum_ops): op_descs.insert(p[1], p[0]) @@ -176,16 +211,19 @@ def _remove_no_grad_branch_(op_descs, no_grad_set): out_arg_names = op_desc.output_arg_names() if len(out_arg_names) == 0 or _all_in_set_(out_arg_names, no_grad_set): return True - if _all_in_set_( - filter(lambda name: name.find(core.grad_var_suffix()) != -1, - op_desc.input_arg_names()), no_grad_set): + if _all_in_set_([ + name for name in op_desc.input_arg_names() + if name.find(core.grad_var_suffix()) != -1 + ], no_grad_set): no_grad_set.update(out_arg_names) return True return False # Remove ops whose outputs are all in no_grad_dict - op_descs = filter( - lambda op_desc: not _op_can_be_removed_(op_desc, no_grad_set), op_descs) + op_descs = [ + op_desc for op_desc in op_descs + if not _op_can_be_removed_(op_desc, no_grad_set) + ] # Insert fill_zeros_like_op to_insert = [] for idx, op_desc in enumerate(op_descs): @@ -195,12 +233,12 @@ def _remove_no_grad_branch_(op_descs, no_grad_set): "X": [_strip_grad_suffix_(arg)] }, {"Out": [arg]}, {}), idx)) - map(lambda p: op_descs.insert(p[1], p[0]), reversed(to_insert)) + list([op_descs.insert(p[1], p[0]) for p in reversed(to_insert)]) return op_descs -import proto.framework_pb2 as framework_pb2 +from .proto import framework_pb2 def serialize_op_decs(op_desc): @@ -222,8 +260,10 @@ def _callback_lookup_(op): if op.type == 'parallel_do' and op.attr('use_nccl'): all_vars = op.block.vars param_names = set(op.input('parameters')) - param_names = filter(lambda name: all_vars[name].stop_gradient is False, - param_names) + param_names = [ + name for name in param_names + if all_vars[name].stop_gradient is False + ] param_grad_names = [n + "@GRAD" for n in param_names] class ParallelDoCallBack(object): @@ -304,9 +344,9 @@ def _append_backward_ops_(block, grad_sub_block_list = [] # If the op has its own sub-block, deal with the sub-block first if op.has_attr("sub_block"): - sub_block = program.block(op.block_attr("sub_block")) + sub_block = program.block(op.block_attr_id("sub_block")) grad_sub_block = program.create_block() - grad_sub_block.set_forward_block_idx(sub_block.idx) + grad_sub_block._set_forward_block_idx(sub_block.idx) cb = _callback_lookup_(op) if cb is not None: if callbacks is None: @@ -335,9 +375,12 @@ def _append_backward_ops_(block, no_grad_dict[block.idx]) # append op_desc in grad_op_descs to target_block + op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName() + backward = core.op_proto_and_checker_maker.OpRole.Backward for op_desc in grad_op_descs: new_op_desc = target_block.desc.append_op() new_op_desc.copy_from(op_desc) + new_op_desc.set_attr(op_role_attr_name, backward) grad_to_var["__current_op_desc__"] = new_op_desc if callbacks is not None: assert (isinstance(callbacks, list)) @@ -363,7 +406,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): for op_idx in range(start_op_idx, block.desc.op_size()): op_desc = block.desc.op(op_idx) if op_desc.has_attr("sub_block"): - sub_block = block.program.block(op_desc.block_attr("sub_block")) + sub_block = block.program.block(op_desc.block_attr_id("sub_block")) _append_backward_vars_(sub_block, 0, grad_to_var, grad_info_map) new_vars = set() # create new gradient variables @@ -374,7 +417,7 @@ def _append_backward_vars_(block, start_op_idx, grad_to_var, grad_info_map): continue block.desc.var(grad_var_name) new_vars.add(grad_var_name) - if not grad_to_var.has_key(grad_var_name): + if grad_var_name not in grad_to_var: continue grad_info_map[grad_to_var[grad_var_name]] = (grad_var_name, block) # infer_shape and infer_type @@ -402,7 +445,7 @@ def _rename_grad_(block, start_op_idx, grad_to_var, target_grad_map): op_desc.rename_output(name, new_name) var_map[name] = new_name - for g, ng in var_map.iteritems(): + for g, ng in list(var_map.items()): if g in grad_to_var: grad_to_var[ng] = grad_to_var[g] grad_to_var.pop(g) @@ -414,7 +457,7 @@ def _get_stop_gradients_(program): for block in program.blocks: assert isinstance(block, framework.Block) block_no_grad_set = set() - for var in block.vars.itervalues(): + for var in list(block.vars.values()): assert isinstance(var, framework.Variable) if var.stop_gradient: block_no_grad_set.add(_append_grad_suffix_(var.name)) @@ -425,20 +468,83 @@ def _get_stop_gradients_(program): def append_backward(loss, parameter_list=None, no_grad_set=None, callbacks=None): """ - Append backward part to main_program + Append backward part to main_program. - Args: - loss(Variable): The variable generated by cost function. - parameter_list(list[string]): Parameters that need to be updated by - optimizer. If None, it means all parameters need to be updated. - no_grad_set(set): Variables that have no gradients in Block 0. - All variables with `step_gradient=True` from all blocks will be - automatically added. + A complete neural network training is made up of forward and backward + propagation. However, when we configure a network, we only need to + specify its forwrd part. The backward part is generated automatically + according to the forward part by this function. - Return: - (list[(Variable,Variable)]): list of (parameter, gradient) pair. + In most cases, users do not need to invoke this function manually. It + will be automatically invoked by the optimizer's `minimize` function. + + Args: + loss(Variable): The loss variable of the network. + parameter_list(list[string]|None): Names of parameters that need + to be updated by optimizers. + If it is None, all parameters + will be updated. + Default: None + no_grad_set(set|None): Variables in the Block 0 whose gradients + should be ignored. All variables with + `step_gradient=True` from all blocks will + be automatically added into this set. + Default: None + callbacks(list[callable object]|None): The callbacks are used for + doing some custom jobs during + backward part building. All + callable objects in it will + be invoked once each time a + new gradient operator is added + into the program. The callable + object must has two input + parameters: 'block' and 'context'. + The 'block' is the block which + the new gradient operator will + be added to. The 'context' is a + map, whose keys are gradient + variable names and values are + corresponding original variables. + In addition to this, the 'context' + has another special key-value pair: + the key is string '__current_op_desc__' + and the value is the op_desc of the + gradient operator who has just + triggered the callable object. + + Returns: + list[(Variable,Variable)]: Pairs of parameter and its + corresponding gradients. The key is the parameter and the + value is gradient variable. + + Raises: + AssertionError: If `loss` is not an instance of Variable. + + Examples: + .. code-block:: python + + # network configuration code + # ... + avg_loss = fluid.layers.mean(loss) + param_grad_list = fluid.backward.append_backward(loss=avg_loss) """ assert isinstance(loss, framework.Variable) + + if loss.op is None: + # the loss is from a cloned program. Find loss op manually. + for op in reversed(loss.block.ops): + assert isinstance(op, framework.Operator) + if len(op.output_arg_names) == 1 and op.output_arg_names[ + 0] == loss.name: + loss.op = op + break + if loss.op is None: + raise ValueError("loss.op is None. Should not happend") + + loss.op.set_attr(core.op_proto_and_checker_maker.kOpRoleAttrName(), + int(core.op_proto_and_checker_maker.OpRole.Forward) | + int(core.op_proto_and_checker_maker.OpRole.Loss)) + if callbacks is not None: isinstance(callbacks, list) @@ -447,7 +553,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, no_grad_set = set() no_grad_set = copy.copy(no_grad_set) no_grad_dict = _get_stop_gradients_(program) - no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set))) grad_info_map = dict() root_block = program.block(0) @@ -456,17 +562,21 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, current_block_idx = program.current_block_idx grad_to_var = dict() - op_desc = _create_op_desc_("fill_constant", {}, { - "Out": [_append_grad_suffix_(loss.name)] - }, {"shape": [1], - "value": 1.0, - "dtype": loss.dtype, - "force_cpu": False}) + op_desc = _create_op_desc_( + "fill_constant", {}, {"Out": [_append_grad_suffix_(loss.name)]}, { + "shape": [1], + "value": 1.0, + "dtype": loss.dtype, + "force_cpu": False, + core.op_proto_and_checker_maker.kOpRoleAttrName(): + int(core.op_proto_and_checker_maker.OpRole.Backward) | + int(core.op_proto_and_checker_maker.OpRole.Loss), + }) root_block.desc.append_op().copy_from(op_desc) block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path = _find_op_path_(root_block, [loss], [], block_no_grad_set) - no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set))) _append_backward_ops_(root_block, op_path, root_block, no_grad_dict, grad_to_var, callbacks) @@ -479,7 +589,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, _append_backward_vars_(root_block, fwd_op_num, grad_to_var, grad_info_map) program.current_block_idx = current_block_idx - program.sync_with_cpp() + program._sync_with_cpp() if parameter_list is not None: parameters = parameter_list @@ -503,6 +613,24 @@ def append_backward(loss, parameter_list=None, no_grad_set=None, params_and_grads.append((param_var, grad_var)) else: params_and_grads.append((param_var, None)) + + op_role_var_attr_name = core.op_proto_and_checker_maker.kOpRoleVarAttrName() + for p, g in params_and_grads: + if g is None: + continue + for op in reversed(program.global_block().ops): + assert isinstance(op, framework.Operator) + if g.name in op.output_arg_names: + g.op = op + break + + if g.op is None: + raise ValueError("Unexpected branch") + attr_val = [p.name, g.name] + if g.op.has_attr(op_role_var_attr_name): + attr_val.extend(g.op.attr(op_role_var_attr_name)) + g.op.set_attr(op_role_var_attr_name, attr_val) + return params_and_grads @@ -587,7 +715,7 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): no_grad_set = set() no_grad_set = copy.copy(no_grad_set) no_grad_dict = _get_stop_gradients_(prog) - no_grad_dict[0].update(map(_append_grad_suffix_, no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, no_grad_set))) fwd_op_num = block.desc.op_size() @@ -621,7 +749,7 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): block_no_grad_set = set(map(_strip_grad_suffix_, no_grad_dict[0])) op_path = _find_op_path_(block, targets, inputs, block_no_grad_set) - no_grad_dict[0].update(map(_append_grad_suffix_, block_no_grad_set)) + no_grad_dict[0].update(list(map(_append_grad_suffix_, block_no_grad_set))) grad_to_var = dict() grad_info_map = dict() _append_backward_ops_(block, op_path, block, no_grad_dict, grad_to_var) @@ -632,7 +760,7 @@ def calc_gradient(targets, inputs, target_gradients=None, no_grad_set=None): _rename_grad_(block, fwd_op_num, grad_to_var, target_grad_map) _append_backward_vars_(block, fwd_op_num, grad_to_var, grad_info_map) - prog.sync_with_cpp() + prog._sync_with_cpp() grad_vars = [] for input_var in inputs: diff --git a/python/paddle/fluid/clip.py b/python/paddle/fluid/clip.py index 12add9e686..4b0a792f78 100644 --- a/python/paddle/fluid/clip.py +++ b/python/paddle/fluid/clip.py @@ -13,10 +13,11 @@ # limitations under the License. import copy +import six import functools -import layers -import framework +from . import layers +from . import framework from . import core __all__ = [ @@ -24,8 +25,6 @@ __all__ = [ 'GradientClipByValue', 'GradientClipByNorm', 'GradientClipByGlobalNorm', - 'append_gradient_clip_ops', - 'error_clip_callback', ] @@ -33,11 +32,30 @@ class BaseErrorClipAttr(object): def __str__(self): raise NotImplementedError() - def append_clip_op(self, block, grad_name): + def _append_clip_op(self, block, grad_name): raise NotImplementedError() class ErrorClipByValue(BaseErrorClipAttr): + """ + Clips tensor values to the range [min, max]. + + Given a tensor t, this operation clips its value to min and max inplace. + + - Any values less than min are set to min. + - Any values greater than max are set to max. + + Args: + max (float): The maximum value to clip by. + min (float, optional): The minimum value to clip by. if not set by user, \ + will be set to -max by framework. + + Examples: + .. code-block:: python + + var = fluid.framework.Variable(..., error_clip=ErrorClipByValue(max=5.0), ...) + """ + def __init__(self, max, min=None): max = float(max) if min is None: @@ -50,7 +68,7 @@ class ErrorClipByValue(BaseErrorClipAttr): def __str__(self): return "ByValue, min=%f, max=%f" % (self.min, self.max) - def append_clip_op(self, block, grad_name): + def _append_clip_op(self, block, grad_name): clip_op_desc = block.desc.append_op() clip_op_desc.set_type("clip") clip_op_desc.set_input("X", [grad_name]) @@ -63,9 +81,8 @@ def error_clip_callback(block, context): # the context is a grad_to_var map grad_to_var = context op_desc = block.desc.op(block.desc.op_size() - 1) - for grad_n in filter(lambda n: grad_to_var.has_key(n), - op_desc.output_arg_names()): - fwd_var = block.var_recursive(grad_to_var[grad_n]) + for grad_n in [n for n in op_desc.output_arg_names() if n in grad_to_var]: + fwd_var = block._var_recursive(grad_to_var[grad_n]) error_clip = getattr(fwd_var, "error_clip", None) if not (error_clip is None or isinstance(error_clip, BaseErrorClipAttr)): @@ -73,17 +90,17 @@ def error_clip_callback(block, context): "Variable's error_clip should be an instance of BaseErrorClipAttr or None." ) if error_clip is not None: - error_clip.append_clip_op(block, grad_n) + error_clip._append_clip_op(block, grad_n) class BaseGradientClipAttr(object): def __str__(self): raise NotImplementedError() - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): raise NotImplementedError() - def create_operators(self, param, grad): + def _create_operators(self, param, grad): raise NotImplementedError() @@ -91,14 +108,39 @@ class NullGradientClipAttr(BaseGradientClipAttr): def __str__(self): return "Null" - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): pass - def create_operators(self, param, grad): + def _create_operators(self, param, grad): return param, grad class GradientClipByValue(BaseGradientClipAttr): + """ + Clips gradient values to the range [min, max]. + + Given a tensor t, this operation clips its value to min and max inplace. + + - Any values less than min are set to min. + - Any values greater than max are set to max. + + Args: + max (float): The maximum value to clip by. + min (float, optional): The minimum value to clip by. if not set by user, \ + will be set to -max by framework. + + Examples: + .. code-block:: python + + w_param_attrs = ParamAttr(name=None, + initializer=UniformInitializer(low=-1.0, high=1.0, seed=0), + learning_rate=1.0, + regularizer=L1Decay(1.0), + trainable=True, + clip=GradientClipByValue(-1.0, 1.0)) + y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs) + """ + def __init__(self, max, min=None): max = float(max) if min is None: @@ -111,33 +153,102 @@ class GradientClipByValue(BaseGradientClipAttr): def __str__(self): return "ByValue, min=%f, max=%f" % (self.min, self.max) - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): pass - def create_operators(self, param, grad): + def _create_operators(self, param, grad): new_grad = layers.clip(x=grad, min=self.min, max=self.max) return param, new_grad class GradientClipByNorm(BaseGradientClipAttr): + """ + Clips tensor values to a maximum L2-norm. + + This operator limits the L2 norm of the input :math:`X` within :math:`max\_norm`. + If the L2 norm of :math:`X` is less than or equal to :math:`max\_norm`, :math:`Out` + will be the same as :math:`X`. If the L2 norm of :math:`X` is greater than + :math:`max\_norm`, :math:`X` will be linearly scaled to make the L2 norm of + :math:`Out` equal to :math:`max\_norm`, as shown in the following formula: + + .. math:: + + Out = \\frac{max\_norm * X}{norm(X)}, + + where :math:`norm(X)` represents the L2 norm of :math:`X`. + + Args: + clip_norm (float): The maximum norm value + + Examples: + .. code-block:: python + + w_param_attrs = ParamAttr(name=None, + initializer=UniformInitializer(low=-1.0, high=1.0, seed=0), + learning_rate=1.0, + regularizer=L1Decay(1.0), + trainable=True, + clip=GradientClipByNorm(clip_norm=2.0)) + y_predict = fluid.layers.fc(input=x, size=1, param_attr=w_param_attrs) + + """ + def __init__(self, clip_norm): self.clip_norm = clip_norm def __str__(self): return "ByNorm, clip_norm=%f" % self.clip_norm - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): pass - def create_operators(self, param, grad): + def _create_operators(self, param, grad): new_grad = layers.clip_by_norm(x=grad, max_norm=self.clip_norm) return param, new_grad class GradientClipByGlobalNorm(BaseGradientClipAttr): + """ + Clips values of multiple tensors by the ratio of the sum of their norms. + + Given a list of tensors t_list, and a clipping ratio clip_norm, this + operation returns a list of clipped tensors list_clipped and the global + norm (global_norm) of all tensors in t_list. + + To perform the clipping, the values :math:`t\_list[i]` are set to: + + .. math:: + + t\_list[i] = t\_list[i] * \\frac{clip\_norm}{\max(global\_norm, clip\_norm)} + + where: + + .. math:: + + global\_norm = \sqrt{\sum_{i=0}^{N-1}(l2norm(t\_list[i]))^2} + + If :math:`clip\_norm > global\_norm` then the entries in t_list remain as they are, + otherwise they're all shrunk by the global ratio. + + Args: + clip_norm (float): The maximum norm value + group_name (str, optional): The group name for this clip. + + Examples: + .. code-block:: python + + p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip) + + with fluid.program_guard(main_program=prog_clip): + fluid.clip.set_gradient_clip( + fluid.clip.GradientClipByGlobalNorm(clip_norm=2.0)) + p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip) + + """ + def __init__(self, clip_norm, group_name="default_group"): - if not isinstance(group_name, basestring): - raise TypeError("'group_name' must be a basestring.") + if not isinstance(group_name, six.string_types): + raise TypeError("'group_name' must be a %s." % (six.string_types)) self.clip_norm = clip_norm self.group_name = group_name @@ -146,7 +257,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): return "ByGlobalNorm, group_name=%s, clip_norm=%f" % (self.group_name, self.clip_norm) - def process_context(self, context, param, grad): + def _process_context(self, context, param, grad): if self.group_name not in context: context[self.group_name] = [] context[self.group_name + "_clip_value"] = self.clip_norm @@ -163,7 +274,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): self.context = context - def create_operators(self, param, grad): + def _create_operators(self, param, grad): group_scale_name = self.group_name + "_scale" if group_scale_name not in self.context: group_norm_var = layers.sums(input=self.context[self.group_name]) @@ -173,7 +284,7 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): x=clip_var, y=layers.elementwise_max( x=clip_var, y=group_norm_var)) - assert group_scale_var.shape == (1L, ) + assert group_scale_var.shape == (1, ) self.context[group_scale_name] = group_scale_var new_grad = layers.elementwise_mul( @@ -183,15 +294,16 @@ class GradientClipByGlobalNorm(BaseGradientClipAttr): def set_gradient_clip(clip, param_list=None, program=None): """ - To specify parameters that require gradient clip. - Args: - clip(BaseGradientClipAttr): An instance of some derived class of BaseGradientClipAttr, - which describes the type and detailed attributes of required gradient clip. - param_list(list, None by default): Parameters that require gradient clip. - It can be a list of parameter or a list of parameter's name. - When it's None, all parameters in the program will be included. - program(Program, None by default): The program where parameters are. - Will be the default main program when assigned with None. + To specify parameters that require gradient clip. + + Args: + clip(BaseGradientClipAttr): An instance of some derived class of BaseGradientClipAttr, + which describes the type and detailed attributes of required gradient clip. + param_list(list(Variable)): Parameters that require gradient clip. + It can be a list of parameter or a list of parameter's name. + When it's None, all parameters in the program will be included. + program(Program): The program where parameters are. + Will be the default main program when assigned with None. """ if not isinstance(clip, BaseGradientClipAttr): raise TypeError( @@ -201,7 +313,7 @@ def set_gradient_clip(clip, param_list=None, program=None): program = framework.default_main_program() if param_list is None: param_list = program.block(0).all_parameters() - if all(isinstance(elem, basestring) for elem in param_list): + if all(isinstance(elem, six.string_types) for elem in param_list): param_list = [program.block(0).var(elem) for elem in param_list] if not all(isinstance(elem, framework.Parameter) for elem in param_list): raise TypeError( @@ -212,23 +324,30 @@ def set_gradient_clip(clip, param_list=None, program=None): param.gradient_clip_attr = copy.deepcopy(clip) -def append_gradient_clip_ops(param_grad): +def append_gradient_clip_ops(param_grads): context = dict() - create_op_callbacks = [] - for p, g in param_grad: - clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) - if clip_attr is None: - clip_attr = NullGradientClipAttr() - if not isinstance(clip_attr, BaseGradientClipAttr): - raise TypeError( - "clip attribute should be an instance of BaseGradientClipAttr") + for p, g in param_grads: + if g is None: + continue + with p.block.program.optimized_guard([p, g]): + clip_attr = getattr(p, 'gradient_clip_attr', NullGradientClipAttr()) + if clip_attr is None: + clip_attr = NullGradientClipAttr() + if not isinstance(clip_attr, BaseGradientClipAttr): + raise TypeError( + "clip attribute should be an instance of BaseGradientClipAttr" + ) + + clip_attr._process_context(context=context, param=p, grad=g) - clip_attr.process_context(context=context, param=p, grad=g) - create_op_callbacks.append( - functools.partial( - clip_attr.create_operators, param=p, grad=g)) + res = [] + for p, g in param_grads: + if g is None: + continue + with p.block.program.optimized_guard([p, g]): + res.append(clip_attr._create_operators(param=p, grad=g)) - return [each_callback() for each_callback in create_op_callbacks] + return res ClipByValue = GradientClipByValue diff --git a/python/paddle/fluid/concurrency.py b/python/paddle/fluid/concurrency.py index 470dd0df52..676a52a917 100644 --- a/python/paddle/fluid/concurrency.py +++ b/python/paddle/fluid/concurrency.py @@ -12,15 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from layers.control_flow import BlockGuard, equal +from .layers.control_flow import BlockGuard, equal from .framework import Operator -from layer_helper import LayerHelper, unique_name -from layers import fill_constant -import core +from .layer_helper import LayerHelper, unique_name +from .layers import fill_constant +from . import core __all__ = [ - 'Go', 'make_channel', 'channel_send', 'channel_recv', 'channel_close', - 'Select' + 'make_channel', 'channel_send', 'channel_recv', 'channel_close', 'Select' ] @@ -35,10 +34,10 @@ class Go(BlockGuard): def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: return False - self.construct_go_op() + self._construct_go_op() return super(Go, self).__exit__(exc_type, exc_val, exc_tb) - def construct_go_op(self): + def _construct_go_op(self): main_program = self.helper.main_program go_block = main_program.current_block() parent_block = main_program.block(main_program.current_block() @@ -69,8 +68,10 @@ class Go(BlockGuard): parent_block.append_op( type='go', inputs={ - 'X': - [parent_block.var_recursive(x_name) for x_name in x_name_list] + 'X': [ + parent_block._var_recursive(x_name) + for x_name in x_name_list + ] }, outputs={}, attrs={'sub_block': go_block}) @@ -259,7 +260,7 @@ class Select(BlockGuard): if var_name in intermediate ] - X = [select_block.var_recursive(x_name) for x_name in params] + X = [select_block._var_recursive(x_name) for x_name in params] # Needs to be used by `equal` inside the cases block. X.append(self.case_to_execute) diff --git a/python/paddle/fluid/contrib/__init__.py b/python/paddle/fluid/contrib/__init__.py new file mode 100644 index 0000000000..58f2da1c3b --- /dev/null +++ b/python/paddle/fluid/contrib/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import decoder +from .decoder import * +from . import memory_usage_calc +from .memory_usage_calc import * + +__all__ = decoder.__all__ + memory_usage_calc.__all__ diff --git a/python/paddle/fluid/contrib/decoder/__init__.py b/python/paddle/fluid/contrib/decoder/__init__.py new file mode 100644 index 0000000000..6343c1543d --- /dev/null +++ b/python/paddle/fluid/contrib/decoder/__init__.py @@ -0,0 +1,18 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import beam_search_decoder +from .beam_search_decoder import * + +__all__ = beam_search_decoder.__all__ diff --git a/python/paddle/fluid/contrib/decoder/beam_search_decoder.py b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py new file mode 100644 index 0000000000..d268a948f7 --- /dev/null +++ b/python/paddle/fluid/contrib/decoder/beam_search_decoder.py @@ -0,0 +1,840 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module provides a general beam search decoder API for RNN based decoders. +The purpose of this API is to allow users to highly customize the behavior +within their RNN decoder(vanilla RNN, LSTM, attention + LSTM, future etc.), +without using the low level API such as while ops. + +This API is still under active development and may change drastically. +""" + +import contextlib +import numpy as np +import six + +from ... import layers +from ...framework import Variable +from ... import core +from ... import framework, unique_name +from ...layer_helper import LayerHelper + +__all__ = ['InitState', 'StateCell', 'TrainingDecoder', 'BeamSearchDecoder'] + + +class _DecoderType: + TRAINING = 1 + BEAM_SEARCH = 2 + + +class InitState(object): + """ + The initial hidden state object. The state objects holds a variable, and may + use it to initialize the hidden state cell of RNN. Usually used as input to + `StateCell` class. + + Args: + init (Variable): The initial variable of the hidden state. If set None, + the variable will be created as a tensor with constant value based + on `shape` and `value` param. + shape (tuple|list): If `init` is None, new Variable's shape. Default + None. + value (float): If `init` is None, new Variable's value. Default None. + init_boot (Variable): If provided, the initial variable will be created + with the same shape as this variable. + need_reorder (bool): If set true, the init will be sorted by its lod + rank within its batches. This should be used if `batch_size > 1`. + dtype (np.dtype|core.VarDesc.VarType|str): Data type of the initial + variable. + + Returns: + An initialized state object. + + Examples: + See `StateCell`. + """ + + def __init__(self, + init=None, + shape=None, + value=0.0, + init_boot=None, + need_reorder=False, + dtype='float32'): + if init is not None: + self._init = init + elif init_boot is None: + raise ValueError( + 'init_boot must be provided to infer the shape of InitState .\n') + else: + self._init = layers.fill_constant_batch_size_like( + input=init_boot, value=value, shape=shape, dtype=dtype) + + self._shape = shape + self._value = value + self._need_reorder = need_reorder + self._dtype = dtype + + @property + def value(self): + return self._init + + @property + def need_reorder(self): + return self._need_reorder + + +class _MemoryState(object): + def __init__(self, state_name, rnn_obj, init_state): + self._state_name = state_name # each is a rnn.memory + self._rnn_obj = rnn_obj + self._state_mem = self._rnn_obj.memory( + init=init_state.value, need_reorder=init_state.need_reorder) + + def get_state(self): + return self._state_mem + + def update_state(self, state): + self._rnn_obj.update_memory(self._state_mem, state) + + +class _ArrayState(object): + def __init__(self, state_name, block, init_state): + self._state_name = state_name + self._block = block + + self._state_array = self._block.create_var( + name=unique_name.generate('array_state_array'), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=init_state.value.dtype) + + self._counter = self._block.create_var( + name=unique_name.generate('array_state_counter'), + type=core.VarDesc.VarType.LOD_TENSOR, + dtype='int64') + + # initialize counter + self._block.append_op( + type='fill_constant', + inputs={}, + outputs={'Out': [self._counter]}, + attrs={ + 'shape': [1], + 'dtype': self._counter.dtype, + 'value': float(0.0), + 'force_cpu': True + }) + + self._counter.stop_gradient = True + + # write initial state + block.append_op( + type='write_to_array', + inputs={'X': init_state.value, + 'I': self._counter}, + outputs={'Out': self._state_array}) + + def get_state(self): + state = layers.array_read(array=self._state_array, i=self._counter) + return state + + def update_state(self, state): + layers.increment(x=self._counter, value=1, in_place=True) + layers.array_write(state, array=self._state_array, i=self._counter) + + +class StateCell(object): + """ + The state cell class stores the hidden state of the RNN cell. A typical RNN + cell has one or more hidden states, and one or more step inputs. This class + allows you to defines the name of hidden states as well as step inputs, and + their associated variables. + + Args: + inputs (dict): A feeding dict of {name(str) : Variable}. It specifies + the names of step inputs for RNN cell, and the associated variables. + The variable could initially be None and set manually during each + RNN step. + states (dict): A feeding dict of {name(str) : InitState object}. It + specifies the names of hidden states and their initialized state. + out_state (str): A string that specifies the name of hidden state that + will be used to compute the score in beam search process. + name (str): The name of the RNN cell. Default None. + + Raises: + `ValueError`: If the initial state is not an instance of InitState, or + the out_state is not in the dict of states. + + Returns: + StateCell: The initialized StateCell object. + + Examples: + .. code-block:: python + hidden_state = InitState(init=encoder_out, need_reorder=True) + state_cell = StateCell( + inputs={'current_word': None}, + states={'h': hidden_state}, + out_state='h') + """ + + def __init__(self, inputs, states, out_state, name=None): + self._helper = LayerHelper('state_cell', name=name) + self._cur_states = {} + self._state_names = [] + for state_name, state in six.iteritems(states): + if not isinstance(state, InitState): + raise ValueError('state must be an InitState object.') + self._cur_states[state_name] = state + self._state_names.append(state_name) + self._inputs = inputs # inputs is place holder here + self._cur_decoder_obj = None + self._in_decoder = False + self._states_holder = {} + self._switched_decoder = False + self._state_updater = None + self._out_state = out_state + if self._out_state not in self._cur_states: + raise ValueError('out_state must be one state in states') + + def _enter_decoder(self, decoder_obj): + if self._in_decoder == True or self._cur_decoder_obj is not None: + raise ValueError('StateCell has already entered a decoder.') + self._in_decoder = True + self._cur_decoder_obj = decoder_obj + self._switched_decoder = False + + def _leave_decoder(self, decoder_obj): + if not self._in_decoder: + raise ValueError('StateCell not in decoder, ' + 'invalid leaving operation.') + + if self._cur_decoder_obj != decoder_obj: + raise ValueError('Inconsistent decoder object in StateCell.') + + self._in_decoder = False + self._cur_decoder_obj = None + self._switched_decoder = False + + def _switch_decoder(self): # lazy switch + if not self._in_decoder: + raise ValueError('StateCell must be enter a decoder.') + + if self._switched_decoder: + raise ValueError('StateCell already done switching.') + + for state_name in self._state_names: + if state_name not in self._states_holder: + state = self._cur_states[state_name] + + if not isinstance(state, InitState): + raise ValueError('Current type of state is %s, should be ' + 'an InitState object.' % type(state)) + + self._states_holder[state_name] = {} + + if self._cur_decoder_obj.type == _DecoderType.TRAINING: + self._states_holder[state_name][id(self._cur_decoder_obj)] \ + = _MemoryState(state_name, + self._cur_decoder_obj.dynamic_rnn, + state) + elif self._cur_decoder_obj.type == _DecoderType.BEAM_SEARCH: + self._states_holder[state_name][id(self._cur_decoder_obj)] \ + = _ArrayState(state_name, + self._cur_decoder_obj._parent_block(), + state) + else: + raise ValueError('Unknown decoder type, only support ' + '[TRAINING, BEAM_SEARCH]') + + # Read back, since current state should be LoDTensor + self._cur_states[state_name] = \ + self._states_holder[state_name][ + id(self._cur_decoder_obj)].get_state() + + self._switched_decoder = True + + def get_state(self, state_name): + """ + The getter of state object. Find the state variable by its name. + + Args: + state_name (str): A string of the state's name. + + Returns: + The associated state object. + """ + if self._in_decoder and not self._switched_decoder: + self._switch_decoder() + + if state_name not in self._cur_states: + raise ValueError( + 'Unknown state %s. Please make sure _switch_decoder() ' + 'invoked.' % state_name) + + return self._cur_states[state_name] + + def get_input(self, input_name): + """ + The getter of input variable. Find the input variable by its name. + + Args: + input_name (str): The string of the input's name. + + Returns: + The associated input variable. + """ + if input_name not in self._inputs or self._inputs[input_name] is None: + raise ValueError('Invalid input %s.' % input_name) + return self._inputs[input_name] + + def set_state(self, state_name, state_value): + """ + The setter of the state variable. Change the variable of the given + `state_name`. + + Args: + state_name (str): The name of the state to change. + state_value (Var): The variable of the new state. + """ + self._cur_states[state_name] = state_value + + def state_updater(self, updater): + """ + Set up the updater to update the hidden state every RNN step. The + behavior of updater could be customized by users. The updater should be + a function that takes a `StateCell` object as input and update the + hidden state within it. The hidden state could be accessed through + `get_state` method. + + Args: + updater (func): the updater to update the state cell. + """ + self._state_updater = updater + + def _decorator(state_cell): + if state_cell == self: + raise TypeError('Updater should only accept a StateCell object ' + 'as argument.') + updater(state_cell) + + return _decorator + + def compute_state(self, inputs): + """ + Provide the step input of RNN cell, and compute the new hidden state + with updater and give step input. + + Args: + inputs (dict): A feed dict, {name(str): Variable}. name should be + the names of step inputs for this RNN cell, and Variable should be + the associated variables. + + Examples: + .. code-block:: python + state_cell.compute_state(inputs={'x': current_word}) + """ + if self._in_decoder and not self._switched_decoder: + self._switch_decoder() + + for input_name, input_value in six.iteritems(inputs): + if input_name not in self._inputs: + raise ValueError('Unknown input %s. ' + 'Please make sure %s in input ' + 'place holder.' % (input_name, input_name)) + self._inputs[input_name] = input_value + self._state_updater(self) + + def update_states(self): + """ + Update and record state information after each RNN step. + """ + if self._in_decoder and not self._switched_decoder: + self._switched_decoder() + + for state_name, decoder_state in six.iteritems(self._states_holder): + if id(self._cur_decoder_obj) not in decoder_state: + raise ValueError('Unknown decoder object, please make sure ' + 'switch_decoder been invoked.') + decoder_state[id(self._cur_decoder_obj)].update_state( + self._cur_states[state_name]) + + def out_state(self): + """ + Get the output state variable. This must be called after update_states. + + Returns: + The output variable of the RNN cell. + """ + return self._cur_states[self._out_state] + + +class TrainingDecoder(object): + """ + A decoder that can only be used for training. The decoder could be + initialized with a `StateCell` object. The computation within the RNN cell + could be defined with decoder's block. + + Args: + state_cell (StateCell): A StateCell object that handles the input and + state variables. + name (str): The name of this decoder. Default None. + + Returns: + TrainingDecoder: The initialized TrainingDecoder object. + + Examples: + .. code-block:: python + decoder = TrainingDecoder(state_cell) + with decoder.block(): + current_word = decoder.step_input(trg_embedding) + decoder.state_cell.compute_state(inputs={'x': current_word}) + current_score = layers.fc(input=decoder.state_cell.get_state('h'), + size=32, + act='softmax') + decoder.state_cell.update_states() + decoder.output(current_score) + """ + BEFORE_DECODER = 0 + IN_DECODER = 1 + AFTER_DECODER = 2 + + def __init__(self, state_cell, name=None): + self._helper = LayerHelper('training_decoder', name=name) + self._status = TrainingDecoder.BEFORE_DECODER + self._dynamic_rnn = layers.DynamicRNN() + self._type = _DecoderType.TRAINING + self._state_cell = state_cell + self._state_cell._enter_decoder(self) + + @contextlib.contextmanager + def block(self): + """ + Define the behavior of the decoder for each RNN time step. + """ + if self._status != TrainingDecoder.BEFORE_DECODER: + raise ValueError('decoder.block() can only be invoked once') + self._status = TrainingDecoder.IN_DECODER + with self._dynamic_rnn.block(): + yield + self._status = TrainingDecoder.AFTER_DECODER + self._state_cell._leave_decoder(self) + + @property + def state_cell(self): + self._assert_in_decoder_block('state_cell') + return self._state_cell + + @property + def dynamic_rnn(self): + return self._dynamic_rnn + + @property + def type(self): + return self._type + + def step_input(self, x): + """ + Set the input variable as a step input to the RNN cell. For example, + in machine translation, each time step we read one word from the target + sentences, then the target sentence is a step input to the RNN cell. + + Args: + x (Variable): the variable to be used as step input. + + Returns: + Variable: The variable as input of current step. + + Examples: + .. code-block:: python + current_word = decoder.step_input(trg_embedding) + """ + self._assert_in_decoder_block('step_input') + return self._dynamic_rnn.step_input(x) + + def static_input(self, x): + """ + Set the input variable as a static input of RNN cell. In contrast to + step input, this variable will be used as a whole within the RNN decode + loop and will not be scattered into time steps. + + Args: + x (Variable): the variable to be used as static input. + + Returns: + Variable: The variable as input of current step. + + Examples: + .. code-block:: python + encoder_vec = decoder.static_input(encoded_vector) + """ + self._assert_in_decoder_block('static_input') + return self._dynamic_rnn.static_input(x) + + def __call__(self, *args, **kwargs): + """ + Get the output of RNN. This API should only be invoked after RNN.block() + + Returns: + Variable: The specified output of the RNN cell. + """ + if self._status != TrainingDecoder.AFTER_DECODER: + raise ValueError('Output of training decoder can only be visited ' + 'outside the block.') + return self._dynamic_rnn(*args, **kwargs) + + def output(self, *outputs): + """ + Set the output variable of the RNN cell. + + Args: + *outputs (Variables): a series of variables that treated as output + of the RNN cell. + + Examples: + .. code-block:: python + out = fluid.layers.fc(input=h, + size=32, + bias_attr=True, + act='softmax') + decoder.output(out) + """ + self._assert_in_decoder_block('output') + self._dynamic_rnn.output(*outputs) + + def _assert_in_decoder_block(self, method): + if self._status != TrainingDecoder.IN_DECODER: + raise ValueError('%s should be invoked inside block of ' + 'TrainingDecoder object.' % method) + + +class BeamSearchDecoder(object): + """ + A beam search decoder that can be used for inference. The decoder should be + initialized with a `StateCell` object. The decode process can be defined + within its block. + + Args: + state_cell (StateCell): A StateCell object that handles the input and + state variables. + init_ids (Variable): The init beam search token ids. + init_scores (Variable): The associated score of each id. + target_dict_dim (int): Size of dictionary. + word_dim (int): Word embedding dimension. + input_var_dict (dict): A feeding dict to feed the required input + variables to the state cell. It will be used by state_cell 's + compute method. Default empty. + topk_size (int): The topk size used for beam search. Default 50. + max_len (int): The maximum allowed length of the generated sentence. + Default 100. + beam_size (int): The beam width of beam search decode. Default 1. + end_id (int): The id of end token within beam search. + name (str): The name of this decoder. Default None. + + Returns: + BeamSearchDecoder: A initialized BeamSearchDecoder object. + + Examples: + .. code-block:: python + decoder = BeamSearchDecoder( + state_cell=state_cell, + init_ids=init_ids, + init_scores=init_scores, + target_dict_dim=target_dict_dim, + word_dim=word_dim, + init_var_dict={}, + topk_size=topk_size, + sparse_emb=IS_SPARSE, + max_len=max_length, + beam_size=beam_size, + end_id=1, + name=None + ) + decoder.decode() + translation_ids, translation_scores = decoder() + """ + BEFORE_BEAM_SEARCH_DECODER = 0 + IN_BEAM_SEARCH_DECODER = 1 + AFTER_BEAM_SEARCH_DECODER = 2 + + def __init__(self, + state_cell, + init_ids, + init_scores, + target_dict_dim, + word_dim, + input_var_dict={}, + topk_size=50, + sparse_emb=True, + max_len=100, + beam_size=1, + end_id=1, + name=None): + self._helper = LayerHelper('beam_search_decoder', name=name) + self._counter = layers.zeros(shape=[1], dtype='int64') + self._counter.stop_gradient = True + self._type = _DecoderType.BEAM_SEARCH + self._max_len = layers.fill_constant( + shape=[1], dtype='int64', value=max_len) + self._cond = layers.less_than( + x=self._counter, + y=layers.fill_constant( + shape=[1], dtype='int64', value=max_len)) + self._while_op = layers.While(self._cond) + self._state_cell = state_cell + self._state_cell._enter_decoder(self) + self._status = BeamSearchDecoder.BEFORE_BEAM_SEARCH_DECODER + self._zero_idx = layers.fill_constant( + shape=[1], value=0, dtype='int64', force_cpu=True) + self._array_dict = {} + self._array_link = [] + self._ids_array = None + self._scores_array = None + self._beam_size = beam_size + self._end_id = end_id + + self._init_ids = init_ids + self._init_scores = init_scores + self._target_dict_dim = target_dict_dim + self._topk_size = topk_size + self._sparse_emb = sparse_emb + self._word_dim = word_dim + self._input_var_dict = input_var_dict + + @contextlib.contextmanager + def block(self): + """ + Define the behavior of the decoder for each RNN time step. + """ + if self._status != BeamSearchDecoder.BEFORE_BEAM_SEARCH_DECODER: + raise ValueError('block() can only be invoke once.') + + self._status = BeamSearchDecoder.IN_BEAM_SEARCH_DECODER + + with self._while_op.block(): + yield + with layers.Switch() as switch: + with switch.case(self._cond): + layers.increment(x=self._counter, value=1.0, in_place=True) + + for value, array in self._array_link: + layers.array_write( + x=value, i=self._counter, array=array) + + layers.less_than( + x=self._counter, y=self._max_len, cond=self._cond) + + self._status = BeamSearchDecoder.AFTER_BEAM_SEARCH_DECODER + self._state_cell._leave_decoder(self) + + @property + def type(self): + return self._type + + def early_stop(self): + """ + Stop the generation process in advance. Could be used as "break". + """ + layers.fill_constant( + shape=[1], value=0, dtype='bool', force_cpu=True, out=self._cond) + + def decode(self): + """ + Set up the computation within the decoder. Then you could call the + decoder to get the result of beam search decode. If you want to define + a more specific decoder, you could override this function. + + Examples: + .. code-block:: python + decoder.decode() + translation_ids, translation_scores = decoder() + """ + with self.block(): + prev_ids = self.read_array(init=self._init_ids, is_ids=True) + prev_scores = self.read_array( + init=self._init_scores, is_scores=True) + prev_ids_embedding = layers.embedding( + input=prev_ids, + size=[self._target_dict_dim, self._word_dim], + dtype='float32', + is_sparse=self._sparse_emb) + + feed_dict = {} + update_dict = {} + + for init_var_name, init_var in six.iteritems(self._input_var_dict): + if init_var_name not in self.state_cell._inputs: + raise ValueError('Variable ' + init_var_name + + ' not found in StateCell!\n') + + read_var = self.read_array(init=init_var) + update_dict[init_var_name] = read_var + feed_var_expanded = layers.sequence_expand(read_var, + prev_scores) + feed_dict[init_var_name] = feed_var_expanded + + for state_str in self._state_cell._state_names: + prev_state = self.state_cell.get_state(state_str) + prev_state_expanded = layers.sequence_expand(prev_state, + prev_scores) + self.state_cell.set_state(state_str, prev_state_expanded) + + for i, input_name in enumerate(self._state_cell._inputs): + if input_name not in feed_dict: + feed_dict[input_name] = prev_ids_embedding + + self.state_cell.compute_state(inputs=feed_dict) + current_state = self.state_cell.out_state() + current_state_with_lod = layers.lod_reset( + x=current_state, y=prev_scores) + scores = layers.fc(input=current_state_with_lod, + size=self._target_dict_dim, + act='softmax') + topk_scores, topk_indices = layers.topk(scores, k=self._topk_size) + accu_scores = layers.elementwise_add( + x=layers.log(x=topk_scores), + y=layers.reshape( + prev_scores, shape=[-1]), + axis=0) + selected_ids, selected_scores = layers.beam_search( + prev_ids, + prev_scores, + topk_indices, + accu_scores, + self._beam_size, + end_id=1, + level=0) + + with layers.Switch() as switch: + with switch.case(layers.is_empty(selected_ids)): + self.early_stop() + with switch.default(): + self.state_cell.update_states() + self.update_array(prev_ids, selected_ids) + self.update_array(prev_scores, selected_scores) + for update_name, var_to_update in six.iteritems( + update_dict): + self.update_array(var_to_update, feed_dict[update_name]) + + def read_array(self, init, is_ids=False, is_scores=False): + """ + Read an array to get the decoded ids and scores generated by previous + RNN step. At the first step of RNN, the init variable mut be used to + initialize the array. + + Args: + init (Variable): The initial variable for first step usage. init + must be provided. + is_ids (bool): Specify whether the variable is an id. + is_scores (bool): Specify whether the variable is a score. + + Returns: + The associated variable generated during previous RNN steps. + + Examples: + .. code-block:: python + prev_ids = decoder.read_array(init=init_ids, is_ids=True) + prev_scores = decoder.read_array(init=init_scores, is_scores=True) + """ + self._assert_in_decoder_block('read_array') + + if is_ids and is_scores: + raise ValueError('Shouldn\'t mark current array be ids array and' + 'scores array at the same time.') + + if not isinstance(init, Variable): + raise TypeError('The input argument `init` must be a Variable.') + + parent_block = self._parent_block() + array = parent_block.create_var( + name=unique_name.generate('beam_search_decoder_array'), + type=core.VarDesc.VarType.LOD_TENSOR_ARRAY, + dtype=init.dtype) + parent_block.append_op( + type='write_to_array', + inputs={'X': init, + 'I': self._zero_idx}, + outputs={'Out': array}) + + if is_ids: + self._ids_array = array + elif is_scores: + self._scores_array = array + + read_value = layers.array_read(array=array, i=self._counter) + self._array_dict[read_value.name] = array + return read_value + + def update_array(self, array, value): + """ + Store the value generated in current step in an array for each RNN step. + This array could be accessed by read_array method. + + Args: + array (Variable): The array to append the new variable to. + value (Variable): The newly generated value to be stored. + """ + self._assert_in_decoder_block('update_array') + + if not isinstance(array, Variable): + raise TypeError( + 'The input argument `array` of must be a Variable.') + if not isinstance(value, Variable): + raise TypeError('The input argument `value` of must be a Variable.') + + array = self._array_dict.get(array.name, None) + if array is None: + raise ValueError('Please invoke read_array before update_array.') + self._array_link.append((value, array)) + + def __call__(self): + """ + Run the decode process and return the final decode result. + + Returns: + A tuple of decoded (id, score) pairs. id is a Variable that holds + the generated tokens, and score is a Variable with the same shape + as id, holds the score for each generated token. + """ + if self._status != BeamSearchDecoder.AFTER_BEAM_SEARCH_DECODER: + raise ValueError('Output of BeamSearchDecoder object can ' + 'only be visited outside the block.') + return layers.beam_search_decode( + ids=self._ids_array, + scores=self._scores_array, + beam_size=self._beam_size, + end_id=self._end_id) + + @property + def state_cell(self): + self._assert_in_decoder_block('state_cell') + return self._state_cell + + def _parent_block(self): + """ + Getter of parent block. + + Returns: + The parent block of decoder. + """ + program = self._helper.main_program + parent_block_idx = program.current_block().parent_idx + if parent_block_idx < 0: + raise ValueError('Invalid block with index %d.' % parent_block_idx) + parent_block = program.block(parent_block_idx) + return parent_block + + def _assert_in_decoder_block(self, method): + if self._status != BeamSearchDecoder.IN_BEAM_SEARCH_DECODER: + raise ValueError('%s should be invoked inside block of ' + 'BeamSearchDecoder object.' % method) diff --git a/python/paddle/fluid/contrib/memory_usage_calc.py b/python/paddle/fluid/contrib/memory_usage_calc.py new file mode 100644 index 0000000000..5da846edb6 --- /dev/null +++ b/python/paddle/fluid/contrib/memory_usage_calc.py @@ -0,0 +1,102 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module privides a memory usage calculate function for user. +The purpose of this API is to allow users to estimate memory usage of +a program under a special batch size, then user can set appropriate +batch size to fully utilize a GPU. + +This API is still under active development and may change drastically. +""" + +from .. import core +from ..framework import Program, Variable + +__all__ = ['memory_usage'] + +dtype_to_size = { + core.VarDesc.VarType.FP16: 2, + core.VarDesc.VarType.FP32: 4, + core.VarDesc.VarType.FP64: 8, + core.VarDesc.VarType.INT16: 2, + core.VarDesc.VarType.INT32: 4, + core.VarDesc.VarType.INT64: 8, + core.VarDesc.VarType.BOOL: 1, + core.VarDesc.VarType.UINT8: 1, +} + +DEBUG = False + + +def memory_usage(program, batch_size): + """ + Get the estimate memory usage of program with input batch size. + + Args: + program(Program): The current Program. + batch_size(int): The current input data batch_size. + + Returns: + min_total_memory(float): the estimate memory usage lower bound. + max_total_memory(float): the estimate memory usage upper bound. + unit_str(string): the unit of estimate usage result. + + Examples: + + >>> import paddle.fluid as fluid + >>> lower_usage, upper_usage, unit = fluid.contrib.memory_usage( + fluid.default_main_program(), batch_size=10) + >>> print "memory usage is about %.3f - %.3f %s" % \ + (lower_usage, upper_usage, unit) + + """ + + # Parameters check + if not isinstance(program, Program): + raise TypeError( + "Calculating Memory Usage requires Program as its Parameter." + "But you passed in %s" % (type(prgram))) + if batch_size <= 0: + raise ValueError("The batch size need to be positive.") + + # Get the var_name list of first block and calculate + total_memory = 0.0 + for var in program.global_block().vars.itervalues(): + data_count = 1 + for x in var.shape: + if x == -1: + data_count *= batch_size + else: + data_count *= x + var_memory = data_count * dtype_to_size[var.dtype] + if DEBUG: + print "%s memory usage: %d" % (var.name, var_memory) + total_memory += var_memory + if DEBUG: + print "total memory usage: %.2f" % (total_memory) + + # Convert appropriate unit + unit_str = "B" + if total_memory > 1024: + total_memory /= 1024 + unit_str = "KB" + if total_memory > 1024: + total_memory /= 1024 + unit_str = "MB" + + # Append extra memory consumption (5% - 10%) + min_total_memory = total_memory * 1.05 + max_total_memory = total_memory * 1.1 + + return min_total_memory, max_total_memory, unit_str diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index ac02401c79..9452cf0e2a 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -12,12 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function -import core +from . import core import numpy -import six.moves as six +import os +import six +from six.moves import zip, range, xrange +import multiprocessing -from framework import Variable, default_main_program +from .framework import Variable, default_main_program __all__ = ['DataFeeder'] @@ -27,6 +29,13 @@ class DataToLoDTensorConverter(object): self.place = place self.lod_level = lod_level self.shape = shape + negtive_count = 0 + for s in self.shape: + if s < 0: + negtive_count += 1 + if negtive_count > 1: + self.shape = None + break if dtype == core.VarDesc.VarType.FP32: self.dtype = 'float32' elif dtype == core.VarDesc.VarType.INT64: @@ -35,15 +44,17 @@ class DataToLoDTensorConverter(object): self.dtype = 'float64' elif dtype == core.VarDesc.VarType.INT32: self.dtype = 'int32' + elif dtype == core.VarDesc.VarType.UINT8: + self.dtype = 'uint8' else: raise ValueError("dtype must be any of [int32, float32, int64, " - "float64]") + "float64, uint8]") self.data = [] self.lod = [] - for i in six.range(lod_level): - self.lod.append([0]) + for i in six.moves.range(lod_level): + self.lod.append([]) def feed(self, data): self._feed_impl_(data, self.lod, self.lod_level) @@ -52,21 +63,77 @@ class DataToLoDTensorConverter(object): if lod_level == 0: self.data.append(data) else: - cur_lod_len = len(data) - lod[-1].append(lod[-1][-1] + cur_lod_len) + lod[0].append(len(data)) for each_data in data: - self._feed_impl_(each_data, lod[:-1], lod_level - 1) + self._feed_impl_(each_data, lod[1:], lod_level - 1) def done(self): - arr = numpy.array(self.data, dtype=self.dtype).reshape(self.shape) + arr = numpy.array(self.data, dtype=self.dtype) + if self.shape: + arr = arr.reshape(self.shape) t = core.LoDTensor() t.set(arr, self.place) if self.lod_level > 0: - t.set_lod(self.lod) + t.set_recursive_sequence_lengths(self.lod) return t class DataFeeder(object): + """ + DataFeeder converts the data that returned by a reader into a data + structure that can feed into Executor and ParallelExecutor. The reader + usually returns a list of mini-batch data entries. Each data entry in + the list is one sample. Each sample is a list or a tuple with one + feature or multiple features. + + The simple usage shows below: + + .. code-block:: python + + place = fluid.CPUPlace() + img = fluid.layers.data(name='image', shape=[1, 28, 28]) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) + + + If you want to feed data into GPU side separately in advance when you + use multi-GPU to train a model, you can use `decorate_reader` function. + + .. code-block:: python + + place=fluid.CUDAPlace(0) + feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) + reader = feeder.decorate_reader( + paddle.batch(flowers.train(), batch_size=16)) + + Args: + feed_list(list): The Variables or Variables'name that will + feed into model. + place(Place): place indicates feed data into CPU or GPU, if you want to + feed data into GPU, please using `fluid.CUDAPlace(i)` (`i` represents + the GPU id), or if you want to feed data into CPU, please using + `fluid.CPUPlace()`. + program(Program): The Program that will feed data into, if program + is None, it will use default_main_program(). Default None. + + Raises: + ValueError: If some Variable is not in this Program. + + Examples: + .. code-block:: python + + # ... + place = fluid.CPUPlace() + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_vars_name + ] # feed_vars_name is a list of variables' name. + feeder = fluid.DataFeeder(feed_list, place) + for data in reader(): + outs = exe.run(program=main_program, + feed=feeder.feed(data)) + """ + def __init__(self, feed_list, place, program=None): self.feed_dtypes = [] self.feed_names = [] @@ -75,7 +142,7 @@ class DataFeeder(object): if program is None: program = default_main_program() for each_var in feed_list: - if isinstance(each_var, basestring): + if isinstance(each_var, six.string_types): each_var = program.block(0).var(each_var) if not isinstance(each_var, Variable): raise TypeError("Feed list should contain a list of variable") @@ -96,8 +163,18 @@ class DataFeeder(object): self.place = place def feed(self, iterable): + """ + According to feed_list and iterable, converters the input into + a data structure that can feed into Executor and ParallelExecutor. + + Args: + iterable(list|tuple): the input data. + + Returns: + dict: the result of conversion. + """ converter = [] - for lod_level, shape, dtype in six.zip( + for lod_level, shape, dtype in six.moves.zip( self.feed_lod_level, self.feed_shapes, self.feed_dtypes): converter.append( DataToLoDTensorConverter( @@ -110,9 +187,104 @@ class DataFeeder(object): assert len(each_sample) == len(converter), ( "The number of fields in data (%s) does not match " + "len(feed_list) (%s)") % (len(each_sample), len(converter)) - for each_converter, each_slot in six.zip(converter, each_sample): + for each_converter, each_slot in six.moves.zip(converter, + each_sample): each_converter.feed(each_slot) ret_dict = {} - for each_name, each_converter in six.zip(self.feed_names, converter): + for each_name, each_converter in six.moves.zip(self.feed_names, + converter): ret_dict[each_name] = each_converter.done() return ret_dict + + def feed_parallel(self, iterable, num_places=None): + """ + Takes multiple mini-batches. Each mini-batch will be feed on each + device in advance. + + Args: + iterable(list|tuple): the input data. + num_places(int): the number of devices. Default None. + + Returns: + dict: the result of conversion. + + Notes: + The number of devices and number of mini-batches must be same. + """ + if isinstance(self.place, core.CUDAPlace): + places = [ + core.CUDAPlace(i) + for i in six.moves.xrange( + self._get_number_of_places_(num_places)) + ] + else: + places = [ + core.CPUPlace() + for _ in six.moves.xrange( + self._get_number_of_places_(num_places)) + ] + + if len(iterable) != len(places): + raise ValueError("feed_parallel takes multiple mini-batches. Each " + "mini-batch will be feed on each device. The " + "number of devices and number of mini-batches " + "must be same.") + + place = self.place + for p, batch in six.moves.zip(places, iterable): + self.place = p + yield self.feed(batch) + self.place = place + + def _get_number_of_places_(self, num_places): + if num_places is not None: + return int(num_places) + elif isinstance(self.place, core.CUDAPlace): + return core.get_cuda_device_count() + else: + cpu_num = int( + os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + return cpu_num + + def decorate_reader(self, + reader, + multi_devices, + num_places=None, + drop_last=True): + """ + Converter the input data into a data that returned by reader into + multiple mini-batches. Each mini-batch will be feed on each device. + + Args: + reader(fun): the input data. + multi_devices(bool): the number of places. Default None. + num_places(int): the number of places. Default None. + drop_last(bool): the number of places. Default None. + + Returns: + dict: the result of conversion. + + Raises: + ValueError: If drop_last is False and the data batch which cannot + fit for devices. + """ + + def __reader_creator__(): + if not multi_devices: + for item in reader(): + yield self.feed(item) + else: + num = self._get_number_of_places_(num_places) + item = [] + for batch in reader(): + item.append(batch) + if len(item) == num: + yield list(self.feed_parallel(item, num)) + item = [] + if not drop_last and len(item) != 0: + raise ValueError( + "The data batch which cannot fit for devices will be " + "dropped is not implementation. Other strategies are " + "not implemented") + + return __reader_creator__ diff --git a/python/paddle/fluid/debuger.py b/python/paddle/fluid/debugger.py similarity index 98% rename from python/paddle/fluid/debuger.py rename to python/paddle/fluid/debugger.py index 1c56064a1e..b7a92cf044 100644 --- a/python/paddle/fluid/debuger.py +++ b/python/paddle/fluid/debugger.py @@ -14,8 +14,8 @@ import sys import re -from graphviz import GraphPreviewGenerator -import proto.framework_pb2 as framework_pb2 +from .graphviz import GraphPreviewGenerator +from .proto import framework_pb2 from google.protobuf import text_format _vartype2str_ = [ diff --git a/python/paddle/fluid/evaluator.py b/python/paddle/fluid/evaluator.py index 1ee1d37271..c0671cce9a 100644 --- a/python/paddle/fluid/evaluator.py +++ b/python/paddle/fluid/evaluator.py @@ -15,11 +15,11 @@ import warnings import numpy as np -import layers -from framework import Program, Variable, program_guard -import unique_name -from layer_helper import LayerHelper -from initializer import Constant +from . import layers +from .framework import Program, Variable, program_guard +from . import unique_name +from .layer_helper import LayerHelper +from .initializer import Constant __all__ = [ 'ChunkEvaluator', @@ -41,7 +41,12 @@ def _clone_var_(block, var): class Evaluator(object): """ - Base Class for all evaluators + Warning: better to use the fluid.metrics.* things, more + flexible support via pure Python and Operator, and decoupled + with executor. Short doc are intended to urge new user + start from Metrics. + + Base Class for all evaluators. Args: name(str): The name of evaluator. such as, "accuracy". Used for generate @@ -69,6 +74,10 @@ class Evaluator(object): def reset(self, executor, reset_program=None): """ reset metric states at the begin of each pass/user specified batch + + Args: + executor(Executor|ParallelExecutor): a executor for executing the reset_program + reset_program(Program): a single Program for reset process """ if reset_program is None: reset_program = Program() @@ -85,15 +94,16 @@ class Evaluator(object): def eval(self, executor, eval_program=None): """ Evaluate the statistics merged by multiple mini-batches. + Args: + executor(Executor|ParallelExecutor): a executor for executing the eval_program + eval_program(Program): a single Program for eval process """ raise NotImplementedError() - def create_state(self, suffix, dtype, shape): + def _create_state(self, suffix, dtype, shape): """ Create state variable. - NOTE: It is not a public API. - Args: suffix(str): the state suffix. dtype(str|core.VarDesc.VarType): the state data type @@ -113,9 +123,35 @@ class Evaluator(object): class ChunkEvaluator(Evaluator): """ + Warning: This would be deprecated in the future. Please use fluid.metrics.ChunkEvaluator + instead. + Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter numbers. + For some basics of chunking, please refer to + 'Chunking with Support Vector Machines '. + + Args: + input (Variable): prediction output of the network. + label (Variable): label of the test data set. + chunk_scheme (str): can be IOB/IOE/IOBES and IO. See the chunk_eval op for details. + num_chunk_types (int): the number of chunk type. + excluded_chunk_types (list): A list including chunk type ids, indicating chunk types that are not counted. + + Returns: + tuple: tuple containing: precision, recall, f1_score + + Examples: + .. code-block:: python + + exe = fluid.executor(place) + evaluator = fluid.Evaluator.ChunkEvaluator(input, label) + for epoch in PASS_NUM: + evaluator.reset(exe) + for data in batches: + loss = exe.run(fetch_list=[cost]) + distance, instance_error = distance_evaluator.eval(exe) """ def __init__( @@ -130,11 +166,11 @@ class ChunkEvaluator(Evaluator): if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") - self.num_infer_chunks = self.create_state( + self.num_infer_chunks = self._create_state( dtype='int64', shape=[1], suffix='num_infer_chunks') - self.num_label_chunks = self.create_state( + self.num_label_chunks = self._create_state( dtype='int64', shape=[1], suffix='num_label_chunks') - self.num_correct_chunks = self.create_state( + self.num_correct_chunks = self._create_state( dtype='int64', shape=[1], suffix='num_correct_chunks') precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval( input=input, @@ -178,6 +214,8 @@ class ChunkEvaluator(Evaluator): class EditDistance(Evaluator): """ + Warning: This would be deprecated in the future. Please use fluid.metrics.EditDistance + instead. Accumulate edit distance sum and sequence number from mini-batches and compute the average edit_distance and instance error of all batches. @@ -188,15 +226,16 @@ class EditDistance(Evaluator): ignored_tokens(list of int): Tokens that should be removed before calculating edit distance. - Example: + Examples: + .. code-block:: python - exe = fluid.executor(place) - distance_evaluator = fluid.Evaluator.EditDistance(input, label) - for epoch in PASS_NUM: - distance_evaluator.reset(exe) - for data in batches: - loss = exe.run(fetch_list=[cost]) - distance, instance_error = distance_evaluator.eval(exe) + exe = fluid.executor(place) + distance_evaluator = fluid.Evaluator.EditDistance(input, label) + for epoch in PASS_NUM: + distance_evaluator.reset(exe) + for data in batches: + loss = exe.run(fetch_list=[cost]) + distance, instance_error = distance_evaluator.eval(exe) In the above example: 'distance' is the average of the edit distance in a pass. @@ -210,11 +249,11 @@ class EditDistance(Evaluator): if main_program.current_block().idx != 0: raise ValueError("You can only invoke Evaluator in root block") - self.total_distance = self.create_state( + self.total_distance = self._create_state( dtype='float32', shape=[1], suffix='total_distance') - self.seq_num = self.create_state( + self.seq_num = self._create_state( dtype='int64', shape=[1], suffix='seq_num') - self.instance_error = self.create_state( + self.instance_error = self._create_state( dtype='int64', shape=[1], suffix='instance_error') distances, seq_num = layers.edit_distance( input=input, label=label, ignored_tokens=ignored_tokens) @@ -256,9 +295,10 @@ class EditDistance(Evaluator): class DetectionMAP(Evaluator): """ + Warning: This would be deprecated in the future. Please use fluid.metrics.DetectionMAP + instead. Calculate the detection mean average precision (mAP). - TODO (Dang Qingqing): update the following doc. The general steps are as follows: 1. calculate the true positive and false positive according to the input of detection and labels. @@ -273,10 +313,11 @@ class DetectionMAP(Evaluator): [M, 6]. The layout is [label, confidence, xmin, ymin, xmax, ymax]. gt_label (Variable): The ground truth label index, which is a LoDTensor with shape [N, 1]. - gt_difficult (Variable): Whether this ground truth is a difficult - bounding box (bbox), which is a LoDTensor [N, 1]. gt_box (Variable): The ground truth bounding box (bbox), which is a LoDTensor with shape [N, 6]. The layout is [xmin, ymin, xmax, ymax]. + gt_difficult (Variable|None): Whether this ground truth is a difficult + bounding bbox, which can be a LoDTensor [N, 1] or not set. If None, + it means all the ground truth labels are not difficult bbox. class_num (int): The class number. background_label (int): The index of background label, the background label will be ignored. If set to -1, then all categories will be @@ -284,24 +325,26 @@ class DetectionMAP(Evaluator): overlap_threshold (float): The threshold for deciding true/false positive, 0.5 by defalut. evaluate_difficult (bool): Whether to consider difficult ground truth - for evaluation, True by defalut. + for evaluation, True by defalut. This argument does not work when + gt_difficult is None. ap_version (string): The average precision calculation ways, it must be 'integral' or '11point'. Please check https://sanchom.wordpress.com/tag/average-precision/ for details. - 11point: the 11-point interpolated average precision. - integral: the natural integral of the precision-recall curve. - Example: + Examples: + .. code-block:: python - exe = fluid.executor(place) - map_evaluator = fluid.Evaluator.DetectionMAP(input, - gt_label, gt_difficult, gt_box) - cur_map, accum_map = map_evaluator.get_map_var() - fetch = [cost, cur_map, accum_map] - for epoch in PASS_NUM: - map_evaluator.reset(exe) - for data in batches: - loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch) + exe = fluid.executor(place) + map_evaluator = fluid.Evaluator.DetectionMAP(input, + gt_label, gt_box, gt_difficult) + cur_map, accum_map = map_evaluator.get_map_var() + fetch = [cost, cur_map, accum_map] + for epoch in PASS_NUM: + map_evaluator.reset(exe) + for data in batches: + loss, cur_map_v, accum_map_v = exe.run(fetch_list=fetch) In the above example: @@ -313,8 +356,8 @@ class DetectionMAP(Evaluator): input, gt_label, gt_box, - gt_difficult, - class_num, + gt_difficult=None, + class_num=None, background_label=0, overlap_threshold=0.5, evaluate_difficult=True, @@ -322,8 +365,11 @@ class DetectionMAP(Evaluator): super(DetectionMAP, self).__init__("map_eval") gt_label = layers.cast(x=gt_label, dtype=gt_box.dtype) - gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype) - label = layers.concat([gt_label, gt_difficult, gt_box], axis=1) + if gt_difficult: + gt_difficult = layers.cast(x=gt_difficult, dtype=gt_box.dtype) + label = layers.concat([gt_label, gt_difficult, gt_box], axis=1) + else: + label = layers.concat([gt_label, gt_box], axis=1) # calculate mean average precision (mAP) of current mini-batch map = layers.detection_map( @@ -335,9 +381,10 @@ class DetectionMAP(Evaluator): evaluate_difficult=evaluate_difficult, ap_version=ap_version) - self.create_state(dtype='int32', shape=None, suffix='accum_pos_count') - self.create_state(dtype='float32', shape=None, suffix='accum_true_pos') - self.create_state(dtype='float32', shape=None, suffix='accum_false_pos') + self._create_state(dtype='int32', shape=None, suffix='accum_pos_count') + self._create_state(dtype='float32', shape=None, suffix='accum_true_pos') + self._create_state( + dtype='float32', shape=None, suffix='accum_false_pos') self.has_state = None var = self.helper.create_variable( diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 7ad028714d..e24b9faae2 100644 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -14,21 +14,27 @@ import numpy as np import contextlib -from framework import Program, default_main_program, Variable +import six +from .framework import Program, default_main_program, Variable from . import core -__all__ = [ - 'Executor', 'global_scope', 'scope_guard', 'switch_scope', 'fetch_var' -] +__all__ = ['Executor', 'global_scope', 'scope_guard', '_switch_scope'] g_scope = core.Scope() def global_scope(): + """ + Get the global/default scope instance. There are a lot of APIs use + :code:`global_scope` as its default value, e.g., :code:`Executor.run` + + Returns: + Scope: The global/default scope instance. + """ return g_scope -def switch_scope(scope): +def _switch_scope(scope): global g_scope ex = g_scope g_scope = scope @@ -37,12 +43,42 @@ def switch_scope(scope): @contextlib.contextmanager def scope_guard(scope): - ex = switch_scope(scope) + """ + Change the global/default scope instance by Python `with` statement. All + variable in runtime will assigned to the new scope. + + Examples: + >>> import paddle.fluid as fluid + >>> new_scope = fluid.Scope() + >>> with fluid.scope_guard(new_scope): + >>> ... + + Args: + scope: The new global/default scope. + """ + ex = _switch_scope(scope) yield - switch_scope(ex) + _switch_scope(ex) def as_numpy(tensor): + """ + Convert a Tensor to a numpy.ndarray, its only support Tensor without LoD information. + For higher dimensional sequence data, please use LoDTensor directly. + Examples: + >>> import paddle.fluid as fluid + >>> outs = executor.run(...) + >>> np_outs = map(lambda x: as_numpy(x), outs) + >>> ... + + Args: + tensor(Variable): a instance of Tensor + + Returns: + numpy.ndarray + """ + if isinstance(tensor, core.LoDTensorArray): + return [as_numpy(t) for t in tensor] if isinstance(tensor, list): return [as_numpy(t) for t in tensor] assert isinstance(tensor, core.LoDTensor) @@ -133,16 +169,20 @@ def has_fetch_operators(block, fetch_targets, fetch_holder_name): return fetch_count > 0 -def fetch_var(name, scope=None, return_numpy=True): +def _fetch_var(name, scope=None, return_numpy=True): """ - Fetch the value of the variable with the given name from the given scope + Fetch the value of the variable with the given name from the + given scope. + Args: name(str): name of the variable. Typically, only persistable variables can be found in the scope used for running the program. scope(core.Scope|None): scope object. It should be the scope where you pass to Executor.run() when running your program. - If None, global_scope() will be used. - return_numpy(bool): whether convert the tensor to numpy.ndarray + If None, global_scope() will be used. Default None. + return_numpy(bool): whether convert the tensor to numpy.ndarray. + Default True. + Returns: LodTensor|numpy.ndarray """ @@ -162,41 +202,82 @@ def fetch_var(name, scope=None, return_numpy=True): return tensor -def get_program_cache_key(feed, fetch_list): - feed_var_names = feed.keys() +def _get_program_cache_key(feed, fetch_list): + feed_var_names = list(feed.keys()) def to_name_str(var): if isinstance(var, Variable): return var.desc.name() elif isinstance(var, str): return var + elif isinstance(var, six.string_types): + return str(var) else: raise TypeError(str(var) + " should be Variable or str") - fetch_var_names = map(to_name_str, fetch_list) + fetch_var_names = list(map(to_name_str, fetch_list)) return str(feed_var_names + fetch_var_names) +def _as_lodtensor(data, place): + """ + Convert numpy.ndarray to Tensor, its only support Tensor without LoD information. + For higher dimensional sequence data, please use LoDTensor directly. + + Examples: + >>> import paddle.fluid as fluid + >>> place = fluid.CPUPlace() + >>> exe = fluid.executor(place) + >>> data = np.array(size=(100, 200, 300)) + >>> np_outs = map(lambda x: fluid.executor._as_lodtensor(x, place), data) + >>> ... + + Args: + data(numpy.ndarray): a instance of array + + Returns: + LoDTensor + """ + if isinstance(data, list): + raise RuntimeError("Some of your feed data hold LoD information. \ + They can not be completely cast from a list of Python \ + ndarray to LoDTensor. Please convert data to LoDTensor \ + directly before feeding the data.\ + ") + # single tensor case + tensor = core.LoDTensor() + tensor.set(data, place) + return tensor + + class Executor(object): + """ + An Executor in Python, only support the single-GPU running. For multi-cards, please refer to + ParallelExecutor. + Python executor takes a program, add feed operators and fetch operators to this program according + to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides + the variables(or names) that user want to get after program run. Note: the executor will run all + operators in the program but not only the operators dependent by the fetch_list. + It store the global variables into the global scope, and create a local scope for the temporary + variables. The local scope contents will be discarded after every minibatch forward/backward finished. + But the global scope variables will be persistent through different runs. + All of ops in program will be running in sequence. + + Args: + place(core.CPUPlace|core.CUDAPlace(n)): indicate the executor run on which device + + Note: For debugging complicated network in parallel-GPUs, you can test it on the executor. + They has the exactly same arguments, and expected the same results. + """ + def __init__(self, place): self.place = place p = core.Place() p.set_place(place) self.executor = core.Executor(p) self.program_caches = dict() - - def as_lodtensor(self, data): - if isinstance(data, list): - raise RuntimeError("Some of your feed data hold LoD information. \ - They can not be completely cast from a list of Python \ - ndarray to LoDTensor. Please convert data to LoDTensor \ - directly before feeding the data.\ - ") - # single tensor case - tensor = core.LoDTensor() - tensor.set(data, self.place) - return tensor + self._closed = False def _get_program_cache(self, program_cache_key): return self.program_caches.get(program_cache_key, None) @@ -230,7 +311,7 @@ class Executor(object): if not has_feed_operators(global_block, feed, feed_var_name): for i, name in enumerate(feed): out = global_block.var(name) - global_block.prepend_op( + global_block._prepend_op( type='feed', inputs={'X': [feed_var]}, outputs={'Out': [out]}, @@ -256,7 +337,7 @@ class Executor(object): feed_target_name = op.desc.output('Out')[0] cur_feed = feed[feed_target_name] if not isinstance(cur_feed, core.LoDTensor): - cur_feed = self.as_lodtensor(cur_feed) + cur_feed = _as_lodtensor(cur_feed, self.place) idx = op.desc.attr('col') core.set_feed_variable(scope, cur_feed, feed_var_name, idx) else: @@ -265,10 +346,28 @@ class Executor(object): def _fetch_data(self, fetch_list, fetch_var_name, scope): outs = [ core.get_fetch_variable(scope, fetch_var_name, i) - for i in xrange(len(fetch_list)) + for i in range(len(fetch_list)) ] return outs + def close(self): + """ + Close this executor. + + You can no long use this executor after calling this method. + For the distributed training, this method would free the resource on PServers related to + the current Trainer. + + Example: + >>> cpu = core.CPUPlace() + >>> exe = Executor(cpu) + >>> ... + >>> exe.close() + """ + if not self._closed: + self.executor.close() + self._closed = True + def run(self, program=None, feed=None, @@ -278,40 +377,72 @@ class Executor(object): scope=None, return_numpy=True, use_program_cache=False): - """ Run program by this Executor. Feed data by feed map, fetch result by fetch_list. - + """ + Run program by this Executor. Feed data by feed map, fetch result by fetch_list. Python executor takes a program, add feed operators and fetch operators to this program according to feed map and fetch_list. Feed map provides input data for the program. fetch_list provides - the variables(or names) that user want to get after program run. Note: the executor will run all + the variables(or names) that user want to get after program run. + + Note: the executor will run all operators in the program but not only the operators dependent by the fetch_list - :param program: the program that need to run, if not provied, then default_main_program will be used. - :param feed: feed variable map, e.g. {"image": ImageData, "label": LableData} - :param fetch_list: a list of variable or variable names that user want to get, run will return them according - to this list. - :param feed_var_name: the name for the input variable of feed Operator. - :param fetch_var_name: the name for the output variable of feed Operator. - :param scope: the scope used to run this program, you can switch it to different scope. default is global_scope - :param return_numpy: if convert the fetched tensor to numpy - :param use_program_cache: set use_program_cache to true if program not changed compare to the last step. - :return: result according to fetch_list. + Args: + program(Program): the program that need to run, if not provied, then default_main_program will be used. + feed(dict): feed variable map, e.g. {"image": ImageData, "label": LableData} + fetch_list(list): a list of variable or variable names that user want to get, run will return them according to this list. + feed_var_name(str): the name for the input variable of feed Operator. + fetch_var_name(str): the name for the output variable of fetch Operator. + scope(Scope): the scope used to run this program, you can switch it to different scope. default is global_scope + return_numpy(bool): if convert the fetched tensor to numpy + use_program_cache(bool): set use_program_cache to true if program not changed compare to the last step. + + Returns: + + list(numpy.array): fetch result according to fetch_list. + + + Examples: + + >>> data = layers.data(name='X', shape=[1], dtype='float32') + >>> hidden = layers.fc(input=data, size=10) + >>> layers.assign(hidden, out) + >>> loss = layers.mean(out) + >>> adam = fluid.optimizer.Adam() + >>> adam.minimize(loss) + + >>> cpu = core.CPUPlace() + >>> exe = Executor(cpu) + >>> exe.run(default_startup_program()) + + >>> x = numpy.random.random(size=(10, 1)).astype('float32') + >>> outs = exe.run( + >>> feed={'X': x}, + >>> fetch_list=[loss.name]) """ + + if self._closed: + raise RuntimeError("Attempted to use a closed Executor") + if feed is None: feed = {} if not isinstance(feed, dict): - raise TypeError("feed should be a map") + raise TypeError( + "feed requires dict as its Parameter. But you passed in %s" % + (type(feed))) if fetch_list is None: fetch_list = [] if program is None: program = default_main_program() if not isinstance(program, Program): - raise TypeError() + raise TypeError( + "Executor requires Program as its Parameter. But you passed in %s" + % (type(program))) if scope is None: scope = global_scope() - cache_key = get_program_cache_key(feed, fetch_list) + cache_key = _get_program_cache_key(feed, fetch_list) if use_program_cache: cached_program = self._get_program_cache(cache_key) if cached_program is None: diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index d7eda619c3..45b3abb88c 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -15,23 +15,30 @@ import collections import contextlib import re +import six import numpy as np -import proto.framework_pb2 as framework_pb2 -from . import core -import unique_name +from .proto import framework_pb2 +try: + from . import core +except ImportError as e: + raise ImportError( + """NOTE: You may need to run \"export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH\" + if you encounters \"libmkldnn.so not found\" errors. If you have python + installed in other directory, replace \"/usr/local/lib\" with your own + directory. The original error is: \n""" + e.message) +except Exception as e: + raise e +from . import unique_name __all__ = [ - 'Block', - 'Variable', 'Program', 'Operator', + 'Parameter', 'default_startup_program', 'default_main_program', 'program_guard', - 'switch_startup_program', - 'switch_main_program', 'get_var', ] @@ -43,7 +50,8 @@ ZERO_VAR_SUFFIX = core.kZeroVarSuffix() def grad_var_name(var_name): """ - return gradient name for a certain var name + Returns: + str: gradient name for a certain var name """ return var_name + GRAD_VAR_SUFFIX @@ -51,10 +59,12 @@ def grad_var_name(var_name): def convert_np_dtype_to_dtype_(np_dtype): """ Convert the data type in numpy to the data type in Paddle + Args: - np_dtype(np.dtype): the data type in numpy + np_dtype(np.dtype): the data type in numpy. - Returns(core.VarDesc.VarType): the data type in Paddle + Returns: + core.VarDesc.VarType: the data type in Paddle. """ dtype = np.dtype(np_dtype) @@ -72,8 +82,12 @@ def convert_np_dtype_to_dtype_(np_dtype): return core.VarDesc.VarType.INT64 elif dtype == np.bool: return core.VarDesc.VarType.BOOL + elif dtype == np.uint16: + return core.VarDesc.VarType.INT16 + elif dtype == np.uint8: + return core.VarDesc.VarType.UINT8 else: - raise ValueError("Not supported numpy dtype " + str(dtype)) + raise ValueError("Not supported numpy dtype " + six.binary_type(dtype)) def dtype_is_floating(dtype): @@ -116,37 +130,53 @@ def _debug_string_(proto, throw_on_error=True): class Variable(object): """ - Python variable. Every input and output of an operator is a variable. Every - variable belongs to a block. The variable has a name and two variables in - different blocks could have the same name. - - There are many kinds of variables. Please reference the framework.proto for - details. + In Fluid, every input and output of an operator is a variable. In most + cases, variables are used for holding different kinds of data or training + labels. A variable belongs to a block. All variable has its own name and + two variables in different blocks could have the same name. - Notes: The constructor of Variable should not be invoked directly. Please - use `Block.create_var` to create a variable. + There are many kinds of variables. Each kind of them has its own attributes + and usages. Please reference the framework.proto for details. - >>> cur_program = Program() - >>> cur_block = cur_program.current_block() - >>> new_variable = cur_block.create_var( - >>> name="X", shape=[-1, 23, 48], dtype='float32') + Most of a Variable's member variables can be setted to be None. It mean + it is not available or will be specified later. Args: - block(Block): The associated block. It will be passed by - `Block.create_var` automatically. + block(Block): The block that the variable belongs to. type(core.VarDesc.VarType): Variable type. Please reference the framework.proto for details. - shape(tuple|list|None): The shape of variable. -1 means the batch size. + name(str|None): The name of the variable. If setted None, it will be + generated automatically. Default: None + shape(tuple|list|None): The shape of the variable. -1 means the batch size. Some kinds of variable do not contain shape, just set it to None. - dtype(np.dtype|core.VarDesc.VarType|str): The data type of variable. - lod_level(int): The level of lod tensor. 0 means it is not a time + Default: None + dtype(np.dtype|core.VarDesc.VarType|str|None): The data type of variable. + Default: None + lod_level (int|None): The level of lod tensor. 0 means it is not a time series data. - capacity(int): The capacity of Channel variable. Ignored - for other types. - persistable(bool): True if the variable should be saved as check point. - Defaults to False. - stop_gradient(bool): True if the variable will stop to calculate - gradients when backward. Defaults to False. + Default: None + capacity (int|None): The capacity of Channel variable. Ignored for other + types. Default: None + persistable (bool|None): True if the variable is persistable. A persistable + variable will not be deleted after an iteration ending. Defaults: None. + error_clip (BaseErrorClipAttr|None): The error clip attributes of the + corresponding gradient variable. Default: None + stop_gradient (bool): True if the variable will stop to calculate its + gradients when backward. Default: False. + is_data (bool): True if the variable is an input data. Default: False + + Notes: + The constructor of Variable should not be invoked directly. Please + use `Block.create_var` to create a variable. + + Examples: + .. code-block:: python + + cur_program = Program() + cur_block = cur_program.current_block() + new_variable = cur_block.create_var(name="X", + shape=[-1, 23, 48], + dtype='float32') """ def __init__(self, @@ -160,6 +190,7 @@ class Variable(object): persistable=None, error_clip=None, stop_gradient=False, + is_data=False, **kwargs): self.block = block self.error_clip = error_clip @@ -167,6 +198,7 @@ class Variable(object): if name is None: name = unique_name.generate('_generated_var') is_new_var = False + name = name if isinstance(name, six.binary_type) else name.encode() self.desc = self.block.desc.find_var(name) if self.desc is None: @@ -238,6 +270,7 @@ class Variable(object): self.block.vars[name] = self self.op = None self.stop_gradient = stop_gradient + self.is_data = is_data def __str__(self): return self.to_string(True) @@ -247,29 +280,39 @@ class Variable(object): Get debug string. Args: - throw_on_error(bool): True if raise an exception when self is not - intialized. + throw_on_error(bool): True if raise an exception when self is + not initialized. with_details(bool): more details about variables and parameters - (e.g. trainable, optimize_attr, ...) will be printed when with_details is True - - Returns(str): The debug string. + (e.g. trainable, optimize_attr, ...) will be printed when + with_details is True. Default False; + Returns: + str: The debug string. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) protostr = self.desc.serialize_to_string() - proto = framework_pb2.VarDesc.FromString(str(protostr)) + proto = framework_pb2.VarDesc.FromString(six.binary_type(protostr)) res_str = _debug_string_(proto, throw_on_error) if with_details: additional_attr = ("error_clip", "stop_gradient") for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, - str(getattr(self, attr_name))) + res_str += "%s: %s\n" % ( + attr_name, six.binary_type(getattr(self, attr_name))) return res_str __repr__ = __str__ - def set_desc(self, input): + def _set_desc(self, input): + """ + Set the variable description. + + Args: + input(core.VarDesc): The new VarDesc. + + Returns: + None + """ self.desc = input @property @@ -305,7 +348,16 @@ class Variable(object): def type(self): return self.desc.type() - def set_error_clip(self, error_clip): + def _set_error_clip(self, error_clip): + """ + Set the error_clip. + + Args: + error_clip(BaseErrorClipAttr) : The new error_clip. + + Returns: + None + """ self.error_clip = error_clip @@ -313,13 +365,13 @@ def get_all_op_protos(): """ Get all registered op proto from PaddlePaddle C++ end. - Returns(list): list of OpProto - + Returns: + list: list of OpProto. """ protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: - op_proto = framework_pb2.OpProto.FromString(str(pbstr)) + op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr)) ret_values.append(op_proto) return ret_values @@ -357,13 +409,63 @@ class OpProtoHolder(object): raise ValueError("Operator \"%s\" has not been registered." % type) return self.op_proto_map[type] + @staticmethod + def generated_op_attr_names(): + return { + core.op_proto_and_checker_maker.kOpRoleAttrName(), + core.op_proto_and_checker_maker.kOpRoleVarAttrName() + } + class Operator(object): """ - Python Operator class. The operator represents the build in instructions in a - Block. Users can use the build in instructions to describe their neural - network. + In Fluid, all the operation are represented by Operator, and Operator + is regarded as a build in an instruction of a Block. Users can use the + build in instructions to describe their neural network. + + Args: + block(Block): The block has the current operator. + desc(core.OpDesc): The protobuf description of Operator. + type(str): The type of operator. Default None. + inputs(dict): The input of this Operator. it is a dictionary, for every + element, key is the input parameter name, and value is a list of + variables. Default None. + outputs(dict): The output of this Operator. it is a dictionary, for + every element, key is the input parameter name, and value is a list + of variables. Default None. + attrs(dict): The attributes of this Operator. it is a dictionary, for + every element, key is attribute name, and value is the attribute value. + The attribute type should be as same as the type registered in C++ side. + Default None. + + Returns: + Operator: The initialized Operator. + + Raises: + ValueError: If the passed input, output and attrs doesn't match the + initializing Operator's that registered in C++ side. + + Notes: + The constructor of operator should not be invoked directly. Use + Block.append_op or Block._prepend_op instead. + + Examples: + .. code-block:: python + + cur_program = Program() + cur_block = cur_program.current_block() + # var1 += var2 + var3 + cur_block.append_op(type="sum", + inputs={"X": [var1, var2, var3]}, + outputs={"Out": [var1]}) """ + OP_WITHOUT_KERNEL_SET = { + 'feed', 'fetch', 'save', 'load', 'recurrent', 'go', + 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', 'recv', + 'listen_and_serv', 'parallel_do', 'save_combine', 'load_combine', + 'ncclInit', 'channel_create', 'channel_close', 'channel_send', + 'channel_recv', 'select', 'checkpoint_notify', 'gen_nccl_id' + } def __init__(self, block, @@ -372,34 +474,28 @@ class Operator(object): inputs=None, outputs=None, attrs=None): - """ - Constructor. + self.block = block + self.desc = desc + # note: not add self.attrs here: + # https://github.com/PaddlePaddle/Paddle/pull/12583#pullrequestreview-145093173 + op_attrs = attrs + if op_attrs is None: + op_attrs = dict() + del attrs - Notes: The constructor of operator should not be invoked directly. Use - Block.append_op or Block.prepend_op instead. + op_maker = core.op_proto_and_checker_maker - >>> cur_program = Program() - >>> cur_block = cur_program.current_block() - >>> # var1 += var2 + var3 - >>> cur_block.append_op(type="sum", - >>> inputs={"X": [var1, var2, var3]}, - >>> outputs={"Out": [var1]}) + if op_maker.kOpRoleAttrName() not in op_attrs: + op_attrs[op_maker.kOpRoleAttrName()] = self.block.program.op_role + + role_var_name = op_maker.kOpRoleVarAttrName() + if len(self.block.program. + op_role_var) != 0 and role_var_name not in op_attrs: + op_attrs[role_var_name] = self.block.program.op_role_var + + if role_var_name in op_attrs and len(op_attrs[role_var_name]) == 0: + del op_attrs[role_var_name] - Args: - block(Block): The block has the current operator. - desc(core.OpDesc): The protobuf description. - type(str): The type of operator. - inputs(dict): The input dictionary. Key is the input parameter name. - Value is a list of variables. - outputs(dict): The output dictionary which has the same format with - inputs. - attrs(dict): The attributes dictionary. Key is attribute name. Value - is the attribute value. The attribute type should be as same as - the type registered in C++ - """ - self.block = block - self.desc = desc - self.attrs = attrs if len(self.desc.type()) != 0: return if type is None: @@ -430,10 +526,19 @@ class Operator(object): % (in_proto.name, len(in_args))) in_arg_names = [] for arg in in_args: - if isinstance(arg, basestring): + if isinstance(arg, six.string_types): in_arg_names.append(arg) + elif isinstance(arg, six.binary_type): + in_arg_names.append(arg.decode()) else: - in_arg_names.append(arg.name) + if isinstance(arg.name, six.string_types): + in_arg_names.append(arg.name) + elif isinstance(arg.name, six.binary_type): + in_arg_names.append(arg.name.decode()) + else: + raise TypeError( + "arguments require unicode, str or bytes, but get %s instead." + % (type(arg.name))) self.desc.set_input(in_proto.name, in_arg_names) else: self.desc.set_input(in_proto.name, []) @@ -448,8 +553,9 @@ class Operator(object): if not given == need: raise ValueError(("Incorrect setting for output(s) of " "operator \"%s\". Need: [%s] Given: [%s]") % - (type, ", ".join(str(e) for e in need), - ", ".join(str(e) for e in given))) + (type, + ", ".join(six.binary_type(e) for e in need), + ", ".join(six.binary_type(e) for e in given))) for out_proto in proto.outputs: out_args = outputs[out_proto.name] @@ -461,50 +567,49 @@ class Operator(object): (out_proto.name, len(out_args))) out_arg_names = [] for arg in out_args: - out_arg_names.append(arg.name) + if isinstance(arg.name, six.string_types): + out_arg_names.append(arg.name) + elif isinstance(arg.name, six.binary_type): + out_arg_names.append(arg.name.decode()) + else: + raise TypeError( + "arguments require unicode, str or bytes, but get %s instead." + % (type(arg.name))) arg.op = self self.desc.set_output(out_proto.name, out_arg_names) - if attrs is not None: - if not isinstance(attrs, dict): + if op_attrs is not None: + if not isinstance(op_attrs, dict): raise TypeError("'attrs' should be a dict.") for attr in proto.attrs: attr_name = attr.name - if (attr_name not in attrs) or (attrs[attr_name] is None): + if (attr_name not in op_attrs) or (op_attrs[attr_name] is None): continue - if isinstance(attrs[attr_name], Block): - self.desc.set_block_attr(attr_name, attrs[attr_name].desc) - elif isinstance(attrs[attr_name], core.BlockDesc) or \ - isinstance(attrs[attr_name], core.ProgramDesc): - self.desc.set_serialized_attr( - attr_name, attrs[attr_name].serialize_to_string()) - else: - self.desc.set_attr(attr_name, attrs[attr_name]) + attr_val = op_attrs[attr_name] + self._update_desc_attr(attr_name, attr_val) self.desc.check_attrs() - no_kernel_op_set = { - 'feed', 'fetch', 'save', 'load', 'recurrent', 'go', - 'rnn_memory_helper_grad', 'conditional_block', 'while', 'send', - 'recv', 'listen_and_serv', 'parallel_do', 'save_combine', - 'load_combine', 'ncclInit', 'channel_create', 'channel_close', - 'channel_send', 'channel_recv', 'select' - } - if type not in no_kernel_op_set: + if self.has_kernel(type): self.desc.infer_var_type(self.block.desc) self.desc.infer_shape(self.block.desc) + def has_kernel(self, op_type): + return op_type not in self.OP_WITHOUT_KERNEL_SET + def to_string(self, throw_on_error): """ - To debug string. + Get debug string. + Args: - throw_on_error(bool): raise exception when self is not initialized - when throw_on_error is True + throw_on_error(bool): Whether to raise exception if self is not + initialized. - Returns(str): The debug string. + Returns: + str: The debug string. """ protostr = self.desc.serialize_to_string() - proto = framework_pb2.OpDesc.FromString(str(protostr)) + proto = framework_pb2.OpDesc.FromString(six.binary_type(protostr)) return _debug_string_(proto, throw_on_error) def __str__(self): @@ -518,29 +623,45 @@ class Operator(object): def input(self, name): """ - Get input arguments by the input parameter name - Args: - name(str): The input parameter name + Get the input arguments according to the input parameter name. - Returns(list): return the list of argument names associated with the - specific parameter name. + Args: + name(str): The input parameter name. + Returns: + list: return the list of argument names that associated with \ + the specific parameter name. """ return self.desc.input(name) def rename_input(self, old_name, new_name): + """ + Rename the `old_name` to `new_name`. + + Args: + old_name(str): The old name of the Operator's input. + new_name(str): The new name of the Operator's input. + + Returns: + None + """ self.desc.rename_input(old_name, new_name) def rename_output(self, old_name, new_name): + """ + Rename the `old_name` to `new_name`. + + Args: + old_name(str): The old name of the Operator's output. + new_name(str): The new name of the Operator's output. + + Returns: + None + """ self.desc.rename_output(old_name, new_name) @property def input_names(self): - """ - Get all input parameter names - Returns(list): return a list of input parameter names - - """ return self.desc.input_names() @property @@ -553,33 +674,23 @@ class Operator(object): def output(self, name): """ - Get output arguments by the output parameter name - Args: - name(str): The output parameter name + Get output arguments by the output parameter name. - Returns(list): return the list of argument names associated with the - specific parameter name. + Args: + name(str): The output parameter name. + Returns: + list: return the list of argument names associated with \ + the specific parameter name. """ return self.desc.output(name) @property def output_names(self): - """ - Get all output parameter names - Returns(list): return a list of output parameter names - - """ return self.desc.output_names() @property def idx(self): - """ - Return the array index of current operator. - Returns(int): The array index in block.ops array - Raises: - ValueError: when the operator is not found. - """ for i, op in enumerate(self.block.ops): if op == self: return i @@ -588,74 +699,192 @@ class Operator(object): def has_attr(self, name): """ - operator has the attribute with name or not. + Whether this Operator has the attribute with name or not. + Args: - name(str): the attribute name + name(str): the attribute name. - Returns(bool): True if has this attribute. + Returns: + bool: True if has this attribute. """ return self.desc.has_attr(name) def attr_type(self, name): """ - Get the type of attribute by attribute name - Args: - name(str): the attribute name + Get the type of attribute by attribute's name. - Returns(core.AttrType): the attribute type + Args: + name(str): the attribute name. + Returns: + core.AttrType: the attribute type. """ return self.desc.attr_type(name) - @property - def attr_names(self): + def set_attr(self, name, val): """ - Get all attribute names - Returns(list): The list of attribute name + Set the value of attribute by attribute's name. + Args: + name(str): the attribute name. + val(bool|int|str|float|list): the value of the attribute. + + Raises: + ValueError: If the type of value doesn't match with desc.attr_type(name). + """ + self._update_desc_attr(name, val) + + def _update_desc_attr(self, name, val): + """ + Update the value of desc's attribute by attribute's name. + + Args: + name(str): the attribute name. + val(bool|int|str|float|list): the value of the attribute. + + Raises: + ValueError: If the type of value doesn't match with desc.attr_type(name). """ + if isinstance(val, Block): + self.desc.set_block_attr(name, val.desc) + elif isinstance(val, list) and val and all( + isinstance(v, Block) for v in val): + self.desc.set_blocks_attr(name, [v.desc for v in val]) + elif isinstance(val, core.BlockDesc) or \ + isinstance(val, core.ProgramDesc): + self.desc.set_serialized_attr(name, val.serialize_to_string()) + else: + self.desc.set_attr(name, val) + + @property + def attr_names(self): return self.desc.attr_names() def attr(self, name): """ - Get attribute by name + Get the attribute by name. + Args: - name(str): the attribute name + name(str): the attribute name. - Returns(bool|int|str|float|list): The attribute value. The return value + Returns: + bool|int|str|float|list: The attribute value. The return value can be any valid attribute type. - """ return self.desc.attr(name) + def block_attr_id(self, name): + """ + Get the block attribute's id by name. + + Args: + name(str): the attribute name. + + Returns: + int: the block index. + """ + return self.desc.block_attr_id(name) + def block_attr(self, name): """ - Get the block attribute by name + Get the block attribute by name. + + Args: + name(str): the attribute name. + + Returns: + block: the block attribute. + """ + + id = self.block_attr_id(name) + assert (id >= 0 and id < len(self.block.program.blocks)) + return self.block.program.blocks[id] + + def blocks_attr(self, name): + """ + Get the blocks attribute by name. + Args: - name(str): the attribute name + name(str): the attribute name. + + Returns: + list: list of the blocks attribute. + """ + attrs = [] + for i in self.blocks_attr_ids(name): + assert (i >= 0 and i < len(self.block.program.blocks)) + attrs.append(self.block.program.blocks[i]) - Returns(int): the block index + return attrs + def blocks_attr_ids(self, name): """ - return self.desc.block_attr(name) + Get the blocks attribute's ids by name. + + Args: + name(str): the attribute name. + + Returns: + list: list of the blocks ids. + """ + + return self.desc.blocks_attr_ids(name) def all_attrs(self): """ - Get the attribute dict - Returns(dict): The Operator's attribute dict + Get the attribute dict. + + Returns: + dict: The Operator's attribute dict, name->attr. """ attr_names = self.attr_names attr_map = {} for n in attr_names: - if n == 'sub_block': + attr_type = self.desc.attr_type(n) + if attr_type == core.AttrType.BLOCK: attr_map[n] = self.block_attr(n) - else: - attr_map[n] = self.attr(n) + continue + + if attr_type == core.AttrType.BLOCKS: + attr_map[n] = self.blocks_attr(n) + continue + + attr_map[n] = self.attr(n) + return attr_map class Block(object): + """ + In Fluid, a Program is consistence of multi-Block, and Block stores + VarDesc and OpDesc. In a specific Block, a VarDesc have a unique name. + One block could have some child blocks, and child block's name scopes + should inherit the parent's so that OpDesc in child block can reference + a VarDesc that is stored in the parent block. + Please reference the framework.proto for details. + + Args: + program(Program): The Program that the Block belongs to. + idx(int): The block's id in the Program. + + Notes: + The constructor of Block should not be invoked directly. Please + use `Program.create_block()` to create a block. + + Examples: + .. code-block:: python + + cur_program = Program() + cur_block = cur_program.current_block() + var = cur_block.create_var(name="X", + shape=[-1, 23, 48], + dtype='float32') + cur_block.append_op(type="abs", + inputs={"X": [var]}, + outputs={"Out": [var]}) + """ + def __init__(self, program, idx): self.desc = program.desc.block(idx) self.vars = collections.OrderedDict() # var_name --> var @@ -668,15 +897,17 @@ class Block(object): def to_string(self, throw_on_error, with_details=False): """ - To debug string. + Get debug string. + Args: throw_on_error(bool): raise exception when self is not initialized - when throw_on_error is True + when throw_on_error is True. with_details(bool): more details about variables and parameters - (e.g. trainable, optimize_attr, ...) will be printed when with_details is True - - Returns(str): The debug string. + (e.g. trainable, optimize_attr, ...) will be printed when + with_details is True. Default False. + Returns: + str: The debug string. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, bool) @@ -684,7 +915,7 @@ class Block(object): re_add_indent = re.compile(r"\n(.)") res_str = "blocks {\n idx: %d\n parent_idx: %d" % ( self.idx, self.parent_idx) - for var in self.vars.itervalues(): + for var in list(self.vars.values()): res_str += "\n vars {\n %s }" % re_add_indent.sub( r"\n \1", var.to_string(throw_on_error, with_details)) for op in self.ops: @@ -693,7 +924,8 @@ class Block(object): res_str += "\n}" else: protostr = self.desc.serialize_to_string() - proto = framework_pb2.BlockDesc.FromString(str(protostr)) + proto = framework_pb2.BlockDesc.FromString( + six.binary_type(protostr)) res_str = _debug_string_(proto, throw_on_error) return res_str @@ -707,22 +939,60 @@ class Block(object): def forward_block_idx(self): return self.desc.get_forward_block_idx() - def set_forward_block_idx(self, idx): - self.desc.set_forward_block_idx(idx) + def _set_forward_block_idx(self, idx): + """ + Set the forward block Idx. + + Args: + idx(int): the block index. + + Returns: + None + """ + self.desc._set_forward_block_idx(idx) @property def idx(self): return self.desc.id def var(self, name): - if not isinstance(name, basestring): - raise TypeError() + """ + Get a Variable by name from this block. + + Args: + name(str): the Variable's name. + + Raises: + ValueError: The If input's type is not str, or this block + doesn't have a Variable with the giving name. + + Returns: + Variable: the Variable with the giving name. + """ + if not isinstance(name, six.string_types): + if not isinstance(name, six.binary_type): + raise TypeError( + "var require string as parameter, but get %s instead." % + (type(name))) v = self.vars.get(name, None) if v is None: raise ValueError("var %s not in this block" % name) return v - def var_recursive(self, name): + def _var_recursive(self, name): + """ + Get a Variable by name from this block recursively. + + Args: + name(str): the Variable's name. + + Raises: + ValueError: this block and this parent block doesn't + have a Variable with the giving name. + + Returns: + Variable: the Variable with the giving name. + """ frontier = list() visited = set() @@ -754,7 +1024,7 @@ class Block(object): return list(self.iter_parameters()) def iter_parameters(self): - return (item[1] for item in self.vars.iteritems() + return (item[1] for item in list(self.vars.items()) if isinstance(item[1], Parameter)) def create_var(self, *args, **kwargs): @@ -766,12 +1036,24 @@ class Block(object): def has_var(self, name): return name in self.vars - def rename_var(self, name, new_name): + def _rename_var(self, name, new_name): """ Rename variable in vars and ops' inputs and outputs + + Args: + name(str): the name that need to be renamed. + new_name(str): the name that need to rename to. + + Raises: + ValueError: If this block doesn't have this the giving name, + or the type of the var with the giving name is not Parameter + or Variable. + + Returns: + Variable: the Variable with the giving name. """ if not self.has_var(name): - raise ValueError("var %s is not in current" % name) + raise ValueError("var %s is not in current block" % name) v = self.var(name) if type(v) == Parameter: var_type = "Parameter" @@ -788,8 +1070,8 @@ class Block(object): else: raise ValueError("unsupported var type: %s", type(v)) orig_var_type = v.type - self.desc.rename_var(name, new_name) - # NOTE: v is destroyed by C++ after calling rename_var. + self.desc._rename_var(name, new_name) + # NOTE: v is destroyed by C++ after calling _rename_var. d = self.desc.find_var(new_name) if var_type == "Parameter": var = Parameter( @@ -812,56 +1094,109 @@ class Block(object): error_clip=error_clip, stop_gradient=stop_gradient) - # rename the python side, sync_with_cpp will only add + # rename the python side, _sync_with_cpp will only add # new vars/ops to python side. self.vars[new_name] = var del self.vars[name] - self.sync_with_cpp() + self._sync_with_cpp() + return var - def remove_var(self, name): - self.sync_with_cpp() - self.desc.remove_var(name) + def _remove_var(self, name): + self._sync_with_cpp() + self.desc._remove_var(name) del self.vars[name] def create_parameter(self, *args, **kwargs): global_block = self.program.global_block() param = Parameter(global_block, *args, **kwargs) if 'initializer' in kwargs: - kwargs['initializer'](param, self) + + def _is_inited_by(block, var): + init_ops = [] + for op in block.ops: + if var.name in op.output_arg_names: + init_ops.append(op) + return init_ops + + initializer = kwargs['initializer'] + init_ops = _is_inited_by(global_block, param) + init_ops_len = len(init_ops) + if init_ops_len > 1: + raise RuntimeError("param " + param.name + + " is inited by multiple init ops " + str( + init_ops)) + elif init_ops_len == 1: + #TODO already inited, do nothing, should log a warning + pass + else: + initializer(param, self) return param def append_op(self, *args, **kwargs): + """ + Appends a new Operator according to the giving arguments. + + Returns: + Operator: the append Operator. + """ op_desc = self.desc.append_op() op = Operator(block=self, desc=op_desc, *args, **kwargs) self.ops.append(op) return op - def insert_op(self, index, *args, **kwargs): - self.sync_with_cpp() - op_desc = self.desc.insert_op(index) + def _insert_op(self, index, *args, **kwargs): + """ + Insert a Operator according to the giving arguments. + + Args: + index(int): the place that the operator to insert. + + Returns: + Operator: the insert Operator. + """ + self._sync_with_cpp() + op_desc = self.desc._insert_op(index) op = Operator(block=self, desc=op_desc, *args, **kwargs) self.ops.insert(index, op) return op - def remove_op(self, index): - self.sync_with_cpp() - self.desc.remove_op(index, index + 1) + def _remove_op(self, index): + """ + Remove the specific position operator. + + Args: + index(int): the position that the operator to insert. + + Returns: + None + """ + self._sync_with_cpp() + self.desc._remove_op(index, index + 1) del self.ops[index] - def slice_ops(self, start, end): + def _slice_ops(self, start, end): + """ + Return the Operator between start and end. + + Args: + start(int): the start position. + end(int): the end position. + + Returns: + list: the Operators between start and end. + """ return self.ops[start:end] - def prepend_op(self, *args, **kwargs): - op_desc = self.desc.prepend_op() + def _prepend_op(self, *args, **kwargs): + op_desc = self.desc._prepend_op() op = Operator(self, op_desc, *args, **kwargs) self.ops.insert(0, op) return op - def sync_with_cpp(self): + def _sync_with_cpp(self): """ - Sync from the desc on the c++ end. - - This method is used to synchronize the c++ desc instance generated by backward. + Sync from the desc on the c++ end. This method is used to synchronize + the c++ desc instance generated by backward. """ # sync variables from cpp for var in self.desc.all_vars(): @@ -869,7 +1204,7 @@ class Block(object): self.create_var(name=var.name(), desc=var, type=var.type()) # sync variables removed from c++ end - for var in self.vars.keys(): + for var in list(self.vars.keys()): if not self.desc.find_var(var): self.vars.pop(var) @@ -924,22 +1259,28 @@ class Block(object): for index in range(len(self.ops)): assert self.ops[index].desc == ops_in_cpp[index] - def copy_param_info_from(self, other): + def _copy_param_info_from(self, other): """ - Copy the information of parameters from the other block + Copy the information of parameters from the other block. + Args: - other(Block): the other block + other(Block): the other block. + + Raises: + ValueError: If type of input is not Block, or the `other` and this + block is not in the same topology. Returns: None """ if not isinstance(other, Block): - raise TypeError("copy_param_info_from should be invoked with Block") + raise TypeError( + "_copy_param_info_from should be invoked with Block") for p in other.iter_parameters(): assert isinstance(p, Parameter) v = self.vars.get(p.name, None) if v is None: - raise ValueError("copy_param_info_from should be invoked with " + raise ValueError("_copy_param_info_from should be invoked with " "same topology") assert isinstance(v, Variable) new_p = Parameter( @@ -957,14 +1298,15 @@ class Block(object): name=v.name) self.vars[new_p.name] = new_p - def clone_variable(self, var): + def _clone_variable(self, var): """ Clone a variable into current block. + Args: var: the variable to be cloned. Returns: - The new variable cloned from 'var' in current block. + Variable: the new variable cloned from 'var' in current block. """ assert isinstance(var, Variable) ret_var = None @@ -972,13 +1314,17 @@ class Block(object): if var.type == core.VarDesc.VarType.STEP_SCOPES: ret_var = self.create_var( name=var.name, persistable=var.persistable, type=var.type) + elif var.type == core.VarDesc.VarType.RAW: + ret_var = self.create_var( + name=var.name, persistable=var.persistable, type=var.type) elif var.type == core.VarDesc.VarType.SELECTED_ROWS: ret_var = self.create_var( name=var.name, shape=var.shape, dtype=var.dtype, type=var.type, - persistable=True) + persistable=True, + is_data=var.is_data) else: ret_var = self.create_var( name=var.name, @@ -986,30 +1332,139 @@ class Block(object): dtype=var.dtype, type=var.type, lod_level=var.lod_level, - persistable=True) + persistable=True, + is_data=var.is_data) return ret_var class Program(object): + """ + Python Program. Beneath it is a ProgramDesc, which is used for + create c++ Program. A program is a self-contained programing + language like container. It has at least one Block, when the + control flow op like conditional_block, while_op is included, + it will contains nested block. + Please reference the framework.proto for details. + + Notes: we have default_startup_program and default_main_program + by default, a pair of them will shared the parameters. + The default_startup_program only run once to initialize parameters, + default_main_program run in every mini batch and adjust the weights. + + Returns: + A empty program. + + Examples: + >>> main_program = fluid.Program() + >>> startup_program = fluid.Program() + >>> with fluid.program_guard(main_program=main_program, startup_program=startup_program): + >>> fluid.layers.data(name="x", shape=[-1, 784], dtype='float32') + >>> fluid.layers.data(name="y", shape=[-1, 1], dtype='int32') + >>> fluid.layers.fc(name="fc", shape=[10], dtype='float32', act="relu") + + """ + def __init__(self): self.desc = core.ProgramDesc() self.blocks = [Block(self, 0)] self.current_block_idx = 0 self._seed = 0 + self._current_role = core.op_proto_and_checker_maker.OpRole.Forward + self._op_role_var = [] + + @property + def op_role(self): + """ + The operator role. In a enum {Forward, Backward, Optimize}. + + Notes: this is a low level API. It is used only for ParallelExecutor to + duplicate or schedule operator to devices. + + For example, the forward operator should be executed on every device. + The backward operator should be executed on every device and the + parameter gradient of backward (use :code:`op_role_var` to get this + variable) operator should be merged to one device. The optimization + operators should be executed on only one device and broadcast the + optimization result, i.e., the new parameter, to every other device. + """ + return self._current_role + + @op_role.setter + def set_op_role(self, role): + self._current_role = role + + @property + def op_role_var(self): + """ + The auxiliary variables for :code:`op_role` property. + + See Also: :code:`Program.op_role`'s documentation for details. + + Notes: This is a very low-level API. Users should not use it directly. + """ + return self._op_role_var + + @op_role_var.setter + def set_op_role_var(self, var_name): + self._op_role_var = [var_name] + + @contextlib.contextmanager + def optimized_guard(self, param_and_grads): + """ + A with guard to set :code:`Optimization` :code:`OpRole` and + :code:`OpRoleVar` automatically. + + Notes: This is a very low level API. Users should not use it directly. + + Args: + param_and_grads(list): The variables (names) to be optimized. + + Examples: + + >>> p, g = backward(...) + >>> with program.optimized_guard([p,g]): + >>> p = p - 0.001 * g + """ + OpRole = core.op_proto_and_checker_maker.OpRole + self._current_role = OpRole.Optimize + self._op_role_var = [ + var.name if isinstance(var, Variable) else var + for var in param_and_grads + ] + yield + self._op_role_var = [] + self._current_role = OpRole.Forward def __str__(self): + """ + Get the protobuf debug string of this Program. + + Returns: + (str): The protobuf debug string. + + Raises: + ValueError: If any of required fields is not set. + """ return self.to_string(True) def to_string(self, throw_on_error, with_details=False): """ To debug string. + Args: - throw_on_error(bool): raise exception when self is not initialized - when throw_on_error is True - with_details(bool): more details about variables and parameters - (e.g. trainable, optimize_attr, ...) will be printed when with_details is True + throw_on_error(bool): raise Value error when any of required fields + is not set. - Returns(str): The debug string. + with_details(bool): True if more details about variables and + parameters, e.g., :code:`trainable`, :code:`optimize_attr`, need + to print. + + Returns + (str): The debug string. + + Raises: + ValueError: If any of required fields is not set and throw_on_error is + True. """ assert isinstance(throw_on_error, bool) and isinstance(with_details, @@ -1020,40 +1475,134 @@ class Program(object): res_str += block.to_string(throw_on_error, with_details) else: protostr = self.desc.serialize_to_string() - proto = framework_pb2.ProgramDesc.FromString(str(protostr)) + proto = framework_pb2.ProgramDesc.FromString( + six.binary_type(protostr)) res_str = _debug_string_(proto, throw_on_error) return res_str def get_desc(self): + """ + Get the C++ side of `ProgramDesc` object pointer. The C++ object is + exposed by :code:`pybind`. + + Notes: This is a very low level API. Users should not use this API + directly. + """ return self.desc def clone(self, for_test=False): - """Clone the Program object + """ + Create a new, duplicated program. + + + Some operators, e.g., :code:`batch_norm`, behave differently between + training and testing. They have an attribute, :code:`is_test`, to + control this behaviour. This method will change the :code:`is_test` + attribute of them to :code:`True` when :code:`for_test=True`. + + * Set for_test to False when we want to clone the program for training. + * Set for_test to True when we want to clone the program for testing. + + Notes: This API DOES NOT prune any operator. Use + :code:`clone(for_test=True)` before backward and optimization please. e.g. - Set for_test to False when we want to clone the program for training. - Set for_test to True when we want to clone the program for testing. + >>> test_program = fluid.default_main_program().clone(for_test=True) + >>> optimizer = fluid.optimizer.Momentum(learning_rate=0.01, momentum=0.9) + >>> optimizer.minimize() Args: - for_test(bool): Some operators, such as batch_norm and drop_out ops, - behave differently in training and testing. If for_test is True, - the is_test attributes in these operators will be set to True for - testing purposes, otherwise, they remain unchanged. + for_test(bool): True if change the :code:`is_test` attribute of + operators to :code:`True`. - Returns(Program): - The cloned Program object. + Returns: + Program: The new, duplicated Program object. + + Examples: + + 1. To clone a test program, the sample code is: + + >>> import paddle.fluid as fluid + >>> train_program = fluid.Program() + >>> startup_program = fluid.Program() + >>> with fluid.program_guard(train_program, startup_program): + >>> img = fluid.layers.data(name='image', shape=[784]) + >>> hidden = fluid.layers.fc(input=img, size=200, act='relu') + >>> hidden = fluid.layers.dropout(hidden, dropout_prob=0.5) + >>> loss = fluid.layers.cross_entropy( + >>> input=fluid.layers.fc(hidden, size=10, act='softmax'), + >>> label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + >>> + >>> test_program = train_program.clone(for_test=True) + >>> + >>> sgd = fluid.optimizer.SGD(learning_rate=1e-3) + >>> with fluid.program_guard(train_program, startup_program): + >>> sgd.minimize(loss) + + 2. The :code:`clone` method can be avoid if you create program for + training and program for testing individually. + + >>> import paddle.fluid as fluid + >>> + >>> def network(is_test): + >>> img = fluid.layers.data(name='image', shape=[784]) + >>> hidden = fluid.layers.fc(input=img, size=200, act='relu') + >>> hidden = fluid.layers.dropout(hidden, dropout_prob=0.5, is_test=is_test) + >>> loss = fluid.layers.cross_entropy( + >>> input=fluid.layers.fc(hidden, size=10, act='softmax'), + >>> label=fluid.layers.data(name='label', shape=[1], dtype='int64')) + >>> return loss + >>> + >>> train_program = fluid.Program() + >>> startup_program = fluid.Program() + >>> test_program = fluid.Program() + >>> + >>> with fluid.program_guard(train_program, startup_program): + >>> with fluid.unique_name.guard(): + >>> loss = network(is_test=False) + >>> sgd = fluid.optimizer.SGD(learning_rate=1e-3) + >>> sgd.minimize(loss) + >>> + >>> # the test startup program is not used. + >>> with fluid.program_guard(test_program, fluid.Program()): + >>> with fluid.unique_name.guard(): + >>> loss = network(is_test=True) + + The two code snippets above will generate same programs. """ if for_test: - p = self.inference_optimize() + p = self.inference_optimize(export_for_deployment=False) else: p = Program() + p.current_block_idx = self.current_block_idx + p._seed = self._seed p.desc = core.ProgramDesc(self.desc) p.blocks = [Block(p, i) for i in xrange(self.desc.num_blocks())] - p.sync_with_cpp() - p.copy_param_info_from(self) + p._current_role = self._current_role + p._op_role_var = self._op_role_var + + p._sync_with_cpp() + + p._copy_param_info_from(self) + p.copy_data_info_from(self) return p def prune(self, targets): + """ + Prune operators and variables which are not needed to generate + :code:`targets`. + + Notes: This is a very low level API. Users should not use this API + directly. This API is in flux and not stable. + + Args: + targets(list|Variable|Operator): A list of variables or operators + need to be pruned + + Returns: + Program: A new, pruned program. + + """ if not isinstance(targets, list): targets = [targets] targets_idx = [] @@ -1083,39 +1632,97 @@ class Program(object): targets_idx.append([t.block.idx, t.idx]) res = Program() res.desc = core.prune(self.desc, targets_idx) - res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())] - res.sync_with_cpp() + res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())] + res._sync_with_cpp() return res - def inference_optimize(self): + def inference_optimize(self, export_for_deployment=True): + """ + This method will create a new program and do following adjustments on it: + 1. Remove all reader variables and their creator ops if exist. + + 2. Remove the :code:`read_op` if exists. + + 3. change the :code:`is_test` + attribute of operators to :code:`True`. All the :code:`Parameter` + information will be lost. + + Args: + export_for_deployment(bool): remove the read ops that are added by py_reader + for cpp inference library + + Notes: This API is a very low level API. Use + :code:`Program.clone(for_test=True)` instead. + + Returns: + Program: The new program. + """ # this is an alternative implement before # core.inference_optimize being fixed. res = Program() res.desc = core.ProgramDesc(self.desc) - for i in xrange(res.desc.num_blocks()): + + # remove all readers and the read_op if exist + read_op_idx = 0 + root_block = res.desc.block(0) + if export_for_deployment: + while True: + if read_op_idx >= root_block.op_size() or root_block.op( + read_op_idx).type() == 'read': + break + read_op_idx += 1 + if read_op_idx < root_block.op_size(): + root_block._remove_op(0, read_op_idx + 1) + for var in root_block.all_vars(): + if var.type() == core.VarDesc.VarType.READER: + root_block._remove_var(var.name()) + + # change all `is_test` attributes to True + for i in range(res.desc.num_blocks()): block = res.desc.block(i) - for j in xrange(block.op_size()): + for j in range(block.op_size()): op = block.op(j) if op.has_attr('is_test'): op.set_attr('is_test', True) - res.blocks = [Block(res, i) for i in xrange(res.desc.num_blocks())] - res.sync_with_cpp() + res.blocks = [Block(res, i) for i in range(res.desc.num_blocks())] + res._sync_with_cpp() return res @staticmethod def parse_from_string(binary_str): + """ + Deserialize a program desc from protobuf binary string. + + Notes: All information about parameters will be lost after serialization + and deserialization. + + Args: + binary_str_type(str): The binary prootbuf string. + + Returns: + Program: A deserialized program desc. + """ p = Program() p.desc = core.ProgramDesc(binary_str) - p.blocks = [Block(p, i) for i in xrange(p.desc.num_blocks())] - p.sync_with_cpp() + p.blocks = [Block(p, i) for i in range(p.desc.num_blocks())] + p._sync_with_cpp() return p @property def random_seed(self): + """ + The default random seed for random operators in Program. Zero means get + the random seed from random device. + + Notes: It must be set before the operators have been added. + """ return self._seed @property def num_blocks(self): + """ + The number of blocks in this program. + """ return self.desc.num_blocks() @random_seed.setter @@ -1125,18 +1732,43 @@ class Program(object): self._seed = seed def __repr__(self): - return str(self) + return self.__str__() def global_block(self): + """ + Get the first block of this program. + """ return self.blocks[0] def block(self, index): + """ + Get the :code:`index` block of this program + Args: + index(int): The index of block to get + + Returns: + Block: The :code:`index` block + """ return self.blocks[index] def current_block(self): + """ + Get the current block. The :code:`current` block is the block to append + operators. + """ return self.blocks[self.current_block_idx] def create_block(self, parent_idx=None): + """ + Create a new block with the :code:`parent_idx` and change the current block + to new block. + + Args: + parent_idx(int): The parent block index. + + Returns: + Block: The new block. + """ new_block_idx = len(self.blocks) parent = self.current_block() if parent_idx is None else self.block( parent_idx) @@ -1146,17 +1778,58 @@ class Program(object): return self.current_block() def rollback(self): + """ + Exit a code block, i.e., roll back to the parent block. + Returns: + None + """ self.current_block_idx = self.current_block().parent_idx - def sync_with_cpp(self): + def _sync_with_cpp(self): + """ + Synchronize Python instance to its binding C++ object instance. + If the program is modified in C++ space, this method should be invoked. + + Notes: This is a very low level API. Users should not invoke it + directly. + + Returns: + None + """ for block_idx in range(len(self.blocks), self.desc.num_blocks()): self.blocks.append(Block(self, block_idx)) for block in self.blocks: - block.sync_with_cpp() + block._sync_with_cpp() - def copy_param_info_from(self, other): + def _copy_param_info_from(self, other): """ Copy the information of parameters from other program. + + Notes: This is a very low level API. Users should not invoke it + directly. + + Args: + other(Program): Other program + + Returns: + None + """ + if not isinstance(other, Program): + raise TypeError("_copy_param_info_from should be invoked with " + "Program") + + if len(self.blocks) != len(other.blocks): + raise ValueError("_copy_param_info_from should be invoked with two " + "program, with represent the same topology") + self.global_block()._copy_param_info_from(other.global_block()) + + def copy_data_info_from(self, other): + """ + Copy the information of data variables from other program. + + Notes: This is a very low level API. Users should not invoke it + directly. + Args: other(Program): Other program @@ -1164,21 +1837,52 @@ class Program(object): None """ if not isinstance(other, Program): - raise TypeError("copy_param_info_from should be invoked with " + raise TypeError("_copy_param_info_from should be invoked with " "Program") if len(self.blocks) != len(other.blocks): - raise ValueError("copy_param_info_from should be invoked with two " + raise ValueError("_copy_param_info_from should be invoked with two " "program, with represent the same topology") - self.global_block().copy_param_info_from(other.global_block()) + for var in list(other.global_block().vars.values()): + if var.is_data: + self.global_block().var(var.name).is_data = True def list_vars(self): + """ + Get all variables from this Program. A iterable object is returned. + + Returns: + iterable: The generator will yield every variable in this program. + """ for each_block in self.blocks: - for each_var in each_block.vars.itervalues(): + for each_var in list(each_block.vars.values()): yield each_var class Parameter(Variable): + """ + Parameter is derived from Variable. A parameter is a persistable + Variable, and will be updated by optimizers after each iteration. + The training of a neural network is essentially the updating of + its parameters. + + Relative to a general Variable, a Parameter has several its own + member variables: + + Args: + trainable(bool): True if the parameter need to be updated after + iterations. + optimize_attr(map): Parameter attributes related with optimizing. + Currently, it only contains 'learning_rate'. + Default: {'learning_rate': 1.0} + regularizer(WeightDecayRegularizer): The Regularizer which will + be applied on the parameter. Default: None + gradient_clip_attr(BaseGradientClipAttr): The gradint clip strategy + which will be applied on the parameter. Default: None + do_model_average(bool): True if the model average strategy will + be applied on this parameter. + """ + def __init__(self, block, shape, dtype, **kwargs): if shape is None or dtype is None: raise ValueError("Parameter must set shape and dtype") @@ -1208,6 +1912,7 @@ class Parameter(Variable): def to_string(self, throw_on_error, with_details=False): """ To debug string. + Args: throw_on_error(bool): raise exception when self is not initialized when throw_on_error is True @@ -1224,8 +1929,8 @@ class Parameter(Variable): additional_attr = ("trainable", "optimize_attr", "regularizer", "gradient_clip_attr", "do_model_average") for attr_name in additional_attr: - res_str += "%s: %s\n" % (attr_name, - str(getattr(self, attr_name))) + res_str += "%s: %s\n" % ( + attr_name, six.binary_type(getattr(self, attr_name))) else: res_str = Variable.to_string(self, throw_on_error, False) return res_str @@ -1240,8 +1945,15 @@ _startup_program_ = Program() def default_startup_program(): """ - Get default startup program. In startup program, Paddle will initialize - parameters, initialize nccl handle, etc. + Get default/global startup program. + + The layer function in :code:`fluid.layers` will create parameters, readers, + NCCL handles as global variables. The :code:`startup_program` will + initialize them by the operators in startup program. The layer function will + append these initialization operators into startup program. + + This method will return the :code:`default` or the :code:`current` startup + program. Users can use :code:`fluid.program_guard` to switch program. Returns: Program: startup program @@ -1251,7 +1963,15 @@ def default_startup_program(): def default_main_program(): """ - Get default main program. The main program is used for training or testing. + Get default/global main program. The main program is used for training or + testing. + + All layer function in :code:`fluid.layers` will append operators and + variables to the :code:`default_main_program`. + + The :code:`default_main_program` is the default program in a lot of APIs. + For example, the :code:`Executor.run()` will execute the + :code:`default_main_program` when the program is not specified. Returns: Program: main program @@ -1293,20 +2013,34 @@ def switch_startup_program(program): @contextlib.contextmanager def program_guard(main_program, startup_program=None): """ - Switch program with `with` statement + Change the global main program and startup program with `with` statement. + Layer functions in the Python `with` block will append operators and + variables to the new main programs. Examples: - >>> with program_guard(Program()): - >>> data = fluid.layers.data(...) - >>> hidden = fluid.layers.fc(...) + + >>> import paddle.fluid as fluid + >>> main_program = fluid.Program() + >>> startup_program = fluid.Program() + >>> with fluid.program_guard(main_program, startup_program): + >>> data = fluid.layers.data(...) + >>> hidden = fluid.layers.fc(...) + + Notes: The temporary :code:`Program` can be used if the user does not need + to construct either of startup program or main program. + + Examples: + + >>> import paddle.fluid as fluid + >>> main_program = fluid.Program() + >>> # does not care about startup program. Just pass a temporary value. + >>> with fluid.program_guard(main_program, fluid.Program()): + >>> data = ... Args: - main_program(Program): New main program inside `with` statement + main_program(Program): New main program inside `with` statement. startup_program(Program): New startup program inside `with` statement. None means do not change startup program. - - Returns: - None """ if not isinstance(main_program, Program): raise TypeError("main_program should be Program") @@ -1323,11 +2057,12 @@ def program_guard(main_program, startup_program=None): def get_var(name, program=None): """ - Get a variable by name from the global block of a program + Get a variable by name from the global block of a program. + Args: name(str): name of the variable program(Program|None): program object. - If None, default_global_program() will be used. + If None, default_global_program() will be used. Returns: Variable diff --git a/python/paddle/fluid/graphviz.py b/python/paddle/fluid/graphviz.py index 125b4efa9d..ba67bf5ae6 100644 --- a/python/paddle/fluid/graphviz.py +++ b/python/paddle/fluid/graphviz.py @@ -14,12 +14,13 @@ import os import random +import six import subprocess import logging def crepr(v): - if type(v) is str or type(v) is unicode: + if isinstance(v, six.string_types): return '"%s"' % v return str(v) @@ -104,7 +105,7 @@ class Graph(object): def _rank_repr(self): ranks = sorted( - self.rank_groups.items(), + list(self.rank_groups.items()), cmp=lambda a, b: a[1].priority > b[1].priority) repr = [] for x in ranks: @@ -148,7 +149,7 @@ class Node(object): name=self.name, label=self.label, extra=',' + ','.join("%s=%s" % (key, crepr(value)) - for key, value in self.attrs.items()) + for key, value in list(self.attrs.items())) if self.attrs else "") return reprs @@ -172,7 +173,7 @@ class Edge(object): target=self.target.name, extra="" if not self.attrs else "[" + ','.join("{}={}".format(attr[0], crepr(attr[1])) - for attr in self.attrs.items()) + "]") + for attr in list(self.attrs.items())) + "]") return repr diff --git a/python/paddle/fluid/inferencer.py b/python/paddle/fluid/inferencer.py index b38526bc57..ff382d8b83 100644 --- a/python/paddle/fluid/inferencer.py +++ b/python/paddle/fluid/inferencer.py @@ -12,35 +12,100 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core -import framework -import executor -import io +import contextlib + +from . import core + +from . import executor +from . import framework +from . import io +from . import parallel_executor +from . import unique_name +from .trainer import check_and_get_place + __all__ = ['Inferencer', ] class Inferencer(object): - def __init__(self, network_func, param_path=None, place=None): - # 1. we need to generate a framework.Program by calling - # network_func. Reference: fluid.program_guard in test_word2vec.py + """ + Inferencer High Level API. + + Args: + infer_func (Python func): Infer function that will return predict Variable + param_path (str): The path where the inference model is saved by fluid.io.save_params + place (Place): place to do the inference + parallel (bool): use parallel_executor to run the inference, it will use multi CPU/GPU. - # 2. move the default_main_program to self.program. + Examples: + .. code-block:: python - # 3. run the default_startup program. + def inference_program(): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + return y_predict - # 4. load params from param_path into scope + place = fluid.CPUPlace() + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path="/tmp/model", place=place) + + """ + + def __init__(self, infer_func, param_path, place=None, parallel=False): + self.param_path = param_path self.scope = core.Scope() - self.place = place - self.startup_program = framework.Program() - # TODO: generate the startup_program with network_func + self.parallel = parallel + self.place = check_and_get_place(place) - exe = executor.Executor(place) - exe.run(self.startup_program, scope=self.scope) + self.inference_program = framework.Program() + with framework.program_guard(self.inference_program): + with unique_name.guard(): + self.predict_var = infer_func() - if param_path: + with self._prog_and_scope_guard(): # load params from param_path into scope - io.load_persistables(exe, dirname=param_path) + io.load_params(executor.Executor(self.place), param_path) + + if parallel: + with self._prog_and_scope_guard(): + self.exe = parallel_executor.ParallelExecutor( + use_cuda=isinstance(self.place, core.CUDAPlace), + loss_name=self.predict_var.name) + else: + self.exe = executor.Executor(self.place) + + self.inference_program = self.inference_program.clone(for_test=True) + + def infer(self, inputs, return_numpy=True): + """ + Do Inference for Inputs + + Args: + inputs (map): a map of {"input_name": input_var} that will be feed into the inference program + return_numpy (bool): transform return value into numpy or not + + Returns: + Tensor or Numpy: the predict value of the inference model for the inputs + + Examples: + .. code-block:: python + + tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") + results = inferencer.infer({'x': tensor_x}) + """ + if not isinstance(inputs, dict): + raise ValueError( + "inputs should be a map of {'input_name': input_var}") + + with executor.scope_guard(self.scope): + results = self.exe.run(self.inference_program, + feed=inputs, + fetch_list=[self.predict_var], + return_numpy=return_numpy) + + return results - def infer(self, inputs): - # run self.program - pass + @contextlib.contextmanager + def _prog_and_scope_guard(self): + with framework.program_guard(main_program=self.inference_program): + with executor.scope_guard(self.scope): + yield diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 4e132ed261..3f740dd7c5 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -12,31 +12,46 @@ # See the License for the specific language governing permissions and # limitations under the License. -import framework +from . import framework import numpy as np import contextlib +from .framework import convert_np_dtype_to_dtype_ +from .core import VarDesc __all__ = [ - 'Constant', 'Uniform', 'Normal', 'Xavier', 'force_init_on_cpu', - 'init_on_cpu', 'ConstantInitializer', 'UniformInitializer', - 'NormalInitializer', 'XavierInitializer' + 'Constant', 'Uniform', 'Normal', 'Xavier', 'Bilinear', 'MSRA', + 'force_init_on_cpu', 'init_on_cpu', 'ConstantInitializer', + 'UniformInitializer', 'NormalInitializer', 'XavierInitializer', + 'BilinearInitializer', 'MSRAInitializer' ] _force_init_on_cpu_ = False def force_init_on_cpu(): + """ + The flag of whether force to init variables on CPU. + + Examples: + .. code-block:: python + + if force_init_on_cpu(): + pass + + """ return _force_init_on_cpu_ @contextlib.contextmanager def init_on_cpu(): """ - Switch program with `with` statement + Force the variable to be inited on CPU. Examples: - >>> with init_on_cpu(): - >>> step = layers.create_global_var() + .. code-block:: python + + with init_on_cpu(): + step = layers.create_global_var() """ global _force_init_on_cpu_ @@ -102,14 +117,18 @@ class Initializer(object): class ConstantInitializer(Initializer): """Implements the constant initializer + + Args: + value (float): constant value to initialize the variable + + Examples: + .. code-block:: python + + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.Constant(value=2.0)) """ def __init__(self, value=0.0, force_cpu=False): - """Constructor for ConstantInitializer - - Args: - value: constant value to initialize the variable - """ assert value is not None super(ConstantInitializer, self).__init__() self._value = value @@ -129,7 +148,7 @@ class ConstantInitializer(Initializer): assert isinstance(var, framework.Variable) assert isinstance(block, framework.Block) # Initialization Ops should be prepended and not appended - op = block.prepend_op( + op = block._prepend_op( type="fill_constant", outputs={"Out": var}, attrs={ @@ -144,16 +163,20 @@ class ConstantInitializer(Initializer): class UniformInitializer(Initializer): """Implements the random uniform distribution initializer + + Args: + low (float): lower boundary of the uniform distribution + high (float): upper boundary of the uniform distribution + seed (int): random seed + + Examples: + .. code-block:: python + + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.Uniform(low=-0.5, high=0.5)) """ def __init__(self, low=-1.0, high=1.0, seed=0): - """Constructor for UniformInitializer - - Args: - low: lower boundary of the uniform distribution - high: upper boundary of the uniform distribution - seed: random seed - """ assert low is not None assert high is not None assert high >= low @@ -179,7 +202,7 @@ class UniformInitializer(Initializer): # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed - op = block.prepend_op( + op = block._prepend_op( type="uniform_random", outputs={"Out": var}, attrs={ @@ -194,17 +217,21 @@ class UniformInitializer(Initializer): class NormalInitializer(Initializer): - """Implements the random Normal(Gaussian) distribution initializer + """Implements the Random Normal(Gaussian) distribution initializer + + Args: + loc (float): mean of the normal distribution + scale (float): standard deviation of the normal distribution + seed (int): random seed + + Examples: + .. code-block:: python + + fc = fluid.layers.fc(input=x, size=10, + param_attr=fluid.initializer.Normal(loc=0.0, scale=2.0)) """ def __init__(self, loc=0.0, scale=1.0, seed=0): - """Constructor for NormalInitializer - - Args: - loc: mean of the normal distribution - scale: standard deviation of the normal distribution - seed: random seed - """ assert loc is not None assert scale is not None assert seed is not None @@ -229,7 +256,7 @@ class NormalInitializer(Initializer): # Initialization Ops should be prepended and not appended if self._seed == 0: self._seed = block.program.random_seed - op = block.prepend_op( + op = block._prepend_op( type="gaussian_random", outputs={"Out": var}, attrs={ @@ -237,46 +264,57 @@ class NormalInitializer(Initializer): "dtype": int(var.dtype), "mean": self._mean, "std": self._std_dev, - "seed": self._seed + "seed": self._seed, + "use_mkldnn": False }) var.op = op return op class XavierInitializer(Initializer): - """Implements the Xavier initializer - + """ This class implements the Xavier weight initializer from the paper - Understanding the difficulty of training deep feedforward neural - networks[1] by Xavier Glorot and Yoshua Bengio. + `Understanding the difficulty of training deep feedforward neural + networks `_ + by Xavier Glorot and Yoshua Bengio. This initializer is designed to keep the scale of the gradients approximately same in all the layers. In case of Uniform distribution, - the range is [-x, x], where x = sqrt(6 / (fan_in + fan_out)). + the range is [-x, x], where + + .. math:: + + x = \sqrt{\\frac{6.0}{fan\_in + fan\_out}} + In case of Normal distribution, the mean is 0 and the standard deviation - is sqrt(2/ (fan_in + fan_out)). + is + + .. math:: + + \sqrt{\\frac{2.0}{fan\_in + fan\_out}} + + + Args: + uniform (bool): whether to use uniform or normal distribution + fan_in (float): fan_in for Xavier initialization. If None, it is + inferred from the variable. + fan_out (float): fan_out for Xavier initialization. If None, it is + inferred from the variable. + seed (int): random seed + + Note: + It is recommended to set fan_in and fan_out to None for most cases. + + Examples: + .. code-block:: python + + fc = fluid.layers.fc( + input=queries, size=10, + param_attr=fluid.initializer.Xavier(uniform=False)) - References: - [1] Understanding the difficulty of training deep feedforward neural - networks. International conference on artificial intelligence and - statistics. - (http://proceedings.mlr.press/v9/glorot10a.html) """ def __init__(self, uniform=True, fan_in=None, fan_out=None, seed=0): - """Constructor for XavierInitializer - - Args: - uniform: whether to use uniform or normal distribution - fan_in: fan_in for Xavier initialization. If None, it is - inferred from the variable. - fan_out: fan_out for Xavier initialization. If None, it is - inferred from the variable. - seed: random seed - - Note: It is recommended to set fan_in and fan_out to None for - most cases. - """ assert uniform is not None assert seed is not None super(XavierInitializer, self).__init__() @@ -309,7 +347,7 @@ class XavierInitializer(Initializer): if self._uniform: limit = np.sqrt(6.0 / float(fan_in + fan_out)) - op = block.prepend_op( + op = block._prepend_op( type="uniform_random", outputs={"Out": var}, attrs={ @@ -322,7 +360,7 @@ class XavierInitializer(Initializer): else: std = np.sqrt(2.0 / float(fan_in + fan_out)) - op = block.prepend_op( + op = block._prepend_op( type="gaussian_random", outputs={"Out": var}, attrs={ @@ -340,30 +378,42 @@ class MSRAInitializer(Initializer): """Implements the MSRA initializer a.k.a. Kaiming Initializer This class implements the weight initialization from the paper - Delving Deep into Rectifiers: Surpassing Human-Level Performance on - ImageNet Classification[1] by Kaiming He, Xiangyu Zhang, Shaoqing Ren - and Jian Sun. This is a robust initialization method that particularly - considers the rectifier nonlinearities. In case of Uniform distribution, - the range is [-x, x], where x = sqrt(6 / fan_in). In case of Normal - distribution, the mean is 0 and the standard deviation - is sqrt(2/ fan_in). - - References: - [1] Delving Deep into Rectifiers: Surpassing Human-Level Performance - on ImageNet Classification - (https://arxiv.org/abs/1502.01852) + `Delving Deep into Rectifiers: Surpassing Human-Level Performance on + ImageNet Classification `_ + by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. This is a + robust initialization method that particularly considers the rectifier + nonlinearities. In case of Uniform distribution, the range is [-x, x], where + + .. math:: + + x = \sqrt{\\frac{6.0}{fan\_in}} + + In case of Normal distribution, the mean is 0 and the standard deviation + is + + .. math:: + + \sqrt{\\frac{2.0}{fan\_in}} + + Args: + uniform (bool): whether to use uniform or normal distribution + fan_in (float): fan_in for MSRAInitializer. If None, it is\ + inferred from the variable. + seed (int): random seed + + Note: + It is recommended to set fan_in to None for most cases. + + Examples: + .. code-block:: python + + fc = fluid.layers.fc( + input=queries, size=10, + param_attr=fluid.initializer.MSRA(uniform=False)) """ def __init__(self, uniform=True, fan_in=None, seed=0): """Constructor for MSRAInitializer - - Args: - uniform: whether to use uniform or normal distribution - fan_in: fan_in for MSRAInitializer. If None, it is - inferred from the variable. - seed: random seed - - Note: It is recommended to set fan_in to None for most cases. """ assert uniform is not None assert seed is not None @@ -395,7 +445,7 @@ class MSRAInitializer(Initializer): if self._uniform: limit = np.sqrt(6.0 / float(fan_in)) - op = block.prepend_op( + op = block._prepend_op( type="uniform_random", outputs={"Out": var}, attrs={ @@ -408,7 +458,7 @@ class MSRAInitializer(Initializer): else: std = np.sqrt(2.0 / float(fan_in)) - op = block.prepend_op( + op = block._prepend_op( type="gaussian_random", outputs={"Out": var}, attrs={ @@ -422,6 +472,104 @@ class MSRAInitializer(Initializer): return op +class BilinearInitializer(Initializer): + """ + This initializer can be used in transposed convolution operator to + act as upsampling. Users can upsample a feature map with shape of + (B, C, H, W) by any integer factor. The usage is: + + Examples: + + .. code-block:: python + + factor = 2 + w_attr = ParamAttr(learning_rate=0., regularizer=L2Decay(0.), + initializer=Bilinear()) + conv_up = fluid.layers.conv2d_transpose( + input, + num_filters=C, + output_size=None, + filter_size=2 * factor - factor % 2, + padding=ceil((factor - 1) / 2.), + stride=factor, + groups=C, + param_attr=w_attr, + bias_attr=False) + + Where, `num_filters=C` and `groups=C` means this is channel-wise transposed + convolution. The filter shape will be (C, 1, K, K) where K is `filer_size`, + This initializer will set a (K, K) interpolation kernel for every channel + of the filter identically. The resulting shape of the output feature map + will be (B, C, factor * H, factor * W). Note that the learning rate and the + weight decay are set to 0 in order to keep coefficient values of bilinear + interpolation unchanged during training. + + """ + + def __init__(self): + """Constructor for BilinearInitializer. + """ + super(BilinearInitializer, self).__init__() + + def __call__(self, var, block): + """Add biliear initialization ops for a variable + + Args: + var (Variable): Variable that needs to be initialized. + block (Block): The block in which initialization ops should + be added. + + Returns: + Operator: the initialization op + + Raises: + ValueError: If type of `var` and `block` is not right. + If the shape of `var` size is not 4 and + var.shape[2] != var.shape[3]. + """ + if not isinstance(var, framework.Variable): + raise ValueError("var must be framework.Variable.") + + if not isinstance(block, framework.Block): + raise ValueError("block must be framework.Block.") + + shape = var.shape + if len(shape) != 4: + raise ValueError("the length of shape must be 4.") + if shape[2] != shape[3]: + raise ValueError("shape[2] must be equal to shape[3].") + + weight = np.zeros(np.prod(var.shape), dtype='float32') + size = shape[3] + # factor + f = np.ceil(size / 2.) + # center + c = (2 * f - 1 - f % 2) / (2. * f) + for i in range(np.prod(shape)): + x = i % size + y = (i / size) % size + weight[i] = (1 - abs(x / f - c)) * (1 - abs(y / f - c)) + weight = np.reshape(weight, shape) + + if var.dtype == VarDesc.VarType.FP32: + value_name = "fp32_values" + values = [float(v) for v in weight.flat] + else: + raise ValueError("Unsupported dtype %s", input.dtype) + if np.prod(shape) > 1024 * 1024: + raise ValueError("The size of input is too big. ") + op = block.append_op( + type='assign_value', + outputs={'Out': [var]}, + attrs={ + 'dtype': var.dtype, + 'shape': list(shape), + value_name: values + }) + var.op = op + return op + + # We short the class name, since users will use the initializer with the package # name. The sample code: # @@ -436,3 +584,4 @@ Uniform = UniformInitializer Normal = NormalInitializer Xavier = XavierInitializer MSRA = MSRAInitializer +Bilinear = BilinearInitializer diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index 08b8a878b6..af73421032 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -13,41 +13,62 @@ # limitations under the License. import os +import errno +import time +import shutil +import six from paddle.fluid.evaluator import Evaluator -from paddle.fluid.framework import Program, Parameter, default_main_program, Variable +from paddle.fluid.framework import Program, Parameter, default_main_program, default_startup_program, Variable from . import core __all__ = [ - 'save_vars', - 'save_params', - 'save_persistables', - 'load_vars', - 'load_params', - 'load_persistables', - 'save_inference_model', - 'load_inference_model', - 'get_inference_program', + 'save_vars', 'save_params', 'save_persistables', 'load_vars', 'load_params', + 'load_persistables', 'save_inference_model', 'load_inference_model', + 'get_inference_program' ] def is_parameter(var): - """Check whether the variable is a Parameter. - - This function checks whether the input variable is a Parameter. + """ + Check whether the given variable is an instance of Parameter. Args: - var : The input variable. + var(Variable): The variable to be checked. Returns: - boolean result whether the variable is a Parameter. + bool: True if the given `var` is an instance of Parameter, + False if not. + + Examples: + .. code-block:: python + + param = fluid.default_main_program().global_block().var('fc.w') + res = fluid.io.is_parameter(param) """ return isinstance(var, Parameter) def is_persistable(var): + """ + Check whether the given variable is persistable. + + Args: + var(Variable): The variable to be checked. + + Returns: + bool: True if the given `var` is persistable + False if not. + + Examples: + .. code-block:: python + + param = fluid.default_main_program().global_block().var('fc.w') + res = fluid.io.is_persistable(param) + """ if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ - var.desc.type() == core.VarDesc.VarType.FETCH_LIST: + var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ + var.desc.type() == core.VarDesc.VarType.READER: return False return var.persistable @@ -70,20 +91,69 @@ def save_vars(executor, predicate=None, filename=None): """ - Save variables to directory by executor. - - :param executor: executor that save variable - :param dirname: directory path - :param main_program: program. If vars is None, then filter all variables in this - program which fit `predicate`. Default default_main_program. - :param predicate: The Predicate describes a callable that returns a variable - as a bool. If it returns true, the corresponding input variable will be saved. - :param vars: variables need to be saved. If vars is specified, program & predicate - will be ignored - :param filename: The name of a single file that all vars are saved to. - If it is None, save variables to separate files. - - :return: None + Save variables to the given directory by executor. + + There are two ways to specify variables to be saved: The first way, list + variables in a list and assign it to the `vars`. The second way, assign the + `main_program` with an existing program, then all variables in the program + will be saved. The first way has a higher priority. In other words, if `vars` + are assigned, the `main_program` and the `predicate` will be ignored. + + The `dirname` are used to specify the folder where to save variables. + If you prefer to save variables in separate files in the folder `dirname`, + set `filename` None; if you prefer to save all variables in a single file, + use `filename` to specify it. + + Args: + executor(Executor): The executor to run for saving variables. + dirname(str): The directory path. + main_program(Program|None): The program whose variables will be saved. + If it is None, the default main program will + be used automatically. + Default: None + vars(list[Variable]|None): The list that contains all variables to save. + It has a higher priority than the `main_program`. + Default: None + predicate(function|None): If it is not None, only variables in the + `main_program` that makes predicate(variable)==True + will be saved. It only works when we are using the + `main_program` to specify variables (In other words + `vars` is None). + Default: None + filename(str|None): The file which to save all variables. If you prefer to save + variables separately, set it to None. + Default: None + + Returns: + None + + Raises: + TypeError: If `main_program` is not an instance of Program nor None. + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + + # The first usage: using `main_program` to specify variables + def name_has_fc(var): + res = "fc" in var.name + return res + + prog = fluid.default_main_program() + fluid.io.save_vars(executor=exe, dirname=path, main_program=prog, + vars=None) + # All variables in `main_program` whose name includes "fc" will be saved. + # And variables are going to be saved separately. + + + # The second usage: using `vars` to specify variables + var_list = [var_a, var_b, var_c] + fluid.io.save_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + # var_a, var_b and var_c will be saved. And they are going to be + # saved in the same file named 'var_file' in the path "./my_paddle_model". """ if vars is None: if main_program is None: @@ -94,7 +164,7 @@ def save_vars(executor, save_vars( executor, dirname=dirname, - vars=filter(predicate, main_program.list_vars()), + vars=list(filter(predicate, main_program.list_vars())), filename=filename) else: save_program = Program() @@ -131,7 +201,42 @@ def save_vars(executor, def save_params(executor, dirname, main_program=None, filename=None): """ - Save all parameters to directory with executor. + This function filters out all parameters from the give `main_program` + and then save them to the folder `dirname` or the file `filename`. + + Use the `dirname` to specify the saving folder. If you would like to + save parameters in separate files, set `filename` None; if you would + like to save all parameters in a single file, use `filename` to specify + the file name. + + NOTICE: Some variables are not Parameter while they are necessary for + training. So you can NOT save and continue your training just by + `save_params()` and `load_params()`. Please use `save_persistables()` + and `load_persistables()` instead. + + Args: + executor(Executor): The executor to run for saving parameters. + dirname(str): The saving directory path. + main_program(Program|None): The program whose parameters will be + saved. If it is None, the default + main program will be used automatically. + Default: None + filename(str|None): The file to save all parameters. If you prefer + to save parameters in differnet files, set it + to None. + Default: None + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.save_params(executor=exe, dirname=param_path, + main_program=None) """ save_vars( executor, @@ -144,7 +249,37 @@ def save_params(executor, dirname, main_program=None, filename=None): def save_persistables(executor, dirname, main_program=None, filename=None): """ - Save all persistables to directory with executor. + This function filters out all variables with `persistable==True` from the + give `main_program` and then saves these variables to the folder `dirname` + or file `filename`. + + The `dirname` is used to specify the folder where persistable variables + are going to be saved. If you would like to save variables in separate + files, set `filename` None; if you would like to save all variables in a + single file, use `filename` to specify the file name. + + Args: + executor(Executor): The executor to run for saving persistable variables. + dirname(str): The directory path. + main_program(Program|None): The program whose persistbale variables will + be saved. If it is None, the default main + program will be used automatically. + Default: None + filename(str|None): The file to saved all variables. If you prefer to + save variables in differnet files, set it to None. + Default: None + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.save_persistables(executor=exe, dirname=param_path, + main_program=None) """ save_vars( executor, @@ -162,20 +297,69 @@ def load_vars(executor, predicate=None, filename=None): """ - Load variables from directory by executor. - - :param executor: executor that load variable - :param dirname: directory path - :param main_program: program. If vars is None, then filter all variables in this - program which fit `predicate`. Default default_main_program(). - :param predicate: The Predicate describes a callable that returns a variable - as a bool. If it returns true, the corresponding input variable will be loaded. - :param vars: variables need to be loaded. If vars is specified, program & - predicate will be ignored - :param filename: The name of the single file that all vars are loaded from. - If it is None, load variables from separate files. - - :return: None + Load variables from the given directory by executor. + + There are two ways to specify variables to be loaded: The first way, list + variables in a list and assign it to the `vars`. The second way, assign the + `main_program` with an existing program, then all variables in the program + will be loaded. The first way has a higher priority. In other words if `vars` + are assigned, the `main_program` and the `predicate` will be ignored. + + The `dirname` are used to specify the folder where to load variables. + If variables were saved in separate files in the folder `dirname`, + set `filename` None; if all variables were saved in a single file, + use `filename` to specify it. + + Args: + executor(Executor): The executor to run for loading variables. + dirname(str): The directory path. + main_program(Program|None): The program whose variables will be loaded. + If it is None, the default main program will + be used automatically. + Default: None + vars(list[Variable]|None): The list that contains all variables to load. + It has a higher priority than the `main_program`. + Default: None + predicate(function|None): If it is not None, only variables in the + `main_program` that makes predicate(variable)==True + will be loaded. It only works when we are using the + `main_program` to specify variables (In other words + `vars` is None). + Default: None + filename(str|None): The file which saved all required variables. If variables + were saved in differnet files, set it to None. + Default: None + + Returns: + None + + Raises: + TypeError: If `main_program` is not an instance of Program nor None. + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + + # The first usage: using `main_program` to specify variables + def name_has_fc(var): + res = "fc" in var.name + return res + + prog = fluid.default_main_program() + fluid.io.load_vars(executor=exe, dirname=path, main_program=prog, + vars=None) + # All variables in `main_program` whose name includes "fc" will be loaded. + # And all the variables are supposed to have been saved in differnet files. + + + # The second usage: using `vars` to specify variables + var_list = [var_a, var_b, var_c] + fluid.io.load_vars(executor=exe, dirname=path, vars=var_list, + filename="vars_file") + # var_a, var_b and var_c will be loaded. And they are supposed to haven + # been saved in the same file named 'var_file' in the path "./my_paddle_model". """ if vars is None: if main_program is None: @@ -186,7 +370,7 @@ def load_vars(executor, load_vars( executor, dirname=dirname, - vars=filter(predicate, main_program.list_vars()), + vars=list(filter(predicate, main_program.list_vars())), filename=filename) else: load_prog = Program() @@ -195,6 +379,8 @@ def load_vars(executor, load_var_map = {} for each_var in vars: assert isinstance(each_var, Variable) + if each_var.type == core.VarDesc.VarType.RAW: + continue new_var = _clone_var_in_block_(load_block, each_var) if filename is None: load_block.append_op( @@ -221,7 +407,42 @@ def load_vars(executor, def load_params(executor, dirname, main_program=None, filename=None): """ - load all parameters from directory by executor. + This function filters out all parameters from the give `main_program` + and then trys to load these parameters from the folder `dirname` or + the file `filename`. + + Use the `dirname` to specify the folder where parameters were saved. If + parameters were saved in separate files in the folder `dirname`, set + `filename` None; if all parameters were saved in a single file, use + `filename` to specify the file name. + + NOTICE: Some variables are not Parameter while they are necessary for + training. So you can NOT save and continue your training just by + `save_params()` and `load_params()`. Please use `save_persistables()` + and `load_persistables()` instead. + + Args: + executor(Executor): The executor to run for loading parameters. + dirname(str): The directory path. + main_program(Program|None): The program whose parameters will be + loaded. If it is None, the default + main program will be used automatically. + Default: None + filename(str|None): The file which saved all parameters. If parameters + were saved in differnet files, set it to None. + Default: None + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_params(executor=exe, dirname=param_path, + main_program=None) """ load_vars( executor, @@ -233,7 +454,37 @@ def load_params(executor, dirname, main_program=None, filename=None): def load_persistables(executor, dirname, main_program=None, filename=None): """ - load all persistables from directory by executor. + This function filters out all variables with `persistable==True` from the + give `main_program` and then trys to load these variables from the folder + `dirname` or the file `filename`. + + Use the `dirname` to specify the folder where persistable variables were + saved. If variables were saved in separate files, set `filename` None; + if all variables were saved in a single file, use `filename` to specify + the file name. + + Args: + executor(Executor): The executor to run for loading persistable variables. + dirname(str): The directory path. + main_program(Program|None): The program whose persistbale variables will + be loaded. If it is None, the default main + program will be used automatically. + Default: None + filename(str|None): The file which saved all variables. If variables were + saved in differnet files, set it to None. + Default: None + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + fluid.io.load_persistables(executor=exe, dirname=param_path, + main_program=None) """ load_vars( executor, @@ -263,6 +514,9 @@ def get_inference_program(target_vars, main_program=None): def prepend_feed_ops(inference_program, feed_target_names, feed_holder_name='feed'): + if len(feed_target_names) == 0: + return + global_block = inference_program.global_block() feed_var = global_block.create_var( name=feed_holder_name, @@ -271,7 +525,7 @@ def prepend_feed_ops(inference_program, for i, name in enumerate(feed_target_names): out = global_block.var(name) - global_block.prepend_op( + global_block._prepend_op( type='feed', inputs={'X': [feed_var]}, outputs={'Out': [out]}, @@ -301,31 +555,73 @@ def save_inference_model(dirname, executor, main_program=None, model_filename=None, - params_filename=None): + params_filename=None, + export_for_deployment=True): """ - Build a model especially for inference, - and save it to directory by the executor. - - :param dirname: directory path - :param feeded_var_names: Names of variables that need to be feeded data during inference - :param target_vars: Variables from which we can get inference results. - :param executor: executor that save inference model - :param main_program: original program, which will be pruned to build the inference model. - Default default_main_program(). - :param model_filename: The name of file to save inference program. - If not specified, default filename `__model__` will be used. - :param params_filename: The name of file to save parameters. - It is used for the case that all parameters are saved in a single binary file. - If not specified, parameters are considered saved in separate files. - - :return: None + Prune the given `main_program` to build a new program especially for inference, + and then save it and all related parameters to given `dirname` by the `executor`. + + Args: + dirname(str): The directory path to save the inference model. + feeded_var_names(list[str]): Names of variables that need to be feeded data + during inference. + target_vars(list[Variable]): Variables from which we can get inference + results. + executor(Executor): The executor that saves the inference model. + main_program(Program|None): The original program, which will be pruned to + build the inference model. If is setted None, + the default main program will be used. + Default: None. + model_filename(str|None): The name of file to save the inference program + itself. If is setted None, a default filename + `__model__` will be used. + params_filename(str|None): The name of file to save all related parameters. + If it is setted None, parameters will be saved + in separate files . + export_for_deployment(bool): remove the read ops that are added by py_reader + for cpp inference lib. Default True + + Returns: + None + + Raises: + ValueError: If `feed_var_names` is not a list of basestring. + ValueError: If `target_vars` is not a list of Variable. + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + fluid.io.save_inference_model(dirname=path, feeded_var_names=['img'], + target_vars=[predict_var], executor=exe) + + # In this exsample, the function will prune the default main program + # to make it suitable for infering the `predict_var`. The pruned + # inference program is going to be saved in the "./infer_model/__model__" + # and parameters are going to be saved in separate files under folder + # "./infer_model". + """ - if isinstance(feeded_var_names, basestring): + if isinstance(feeded_var_names, six.binary_type): feeded_var_names = [feeded_var_names] + elif isinstance(feeded_var_names, six.text_type): + feeded_var_names = [feeded_var_names.encode()] else: - if not (bool(feeded_var_names) and all( - isinstance(name, basestring) for name in feeded_var_names)): - raise ValueError("'feed_var_names' should be a list of str.") + if len(feeded_var_names) > 0: + # TODO(paddle-dev): polish these code blocks + if not (bool(feeded_var_names) and all( + isinstance(name, six.binary_type) + for name in feeded_var_names)): + if not (all( + isinstance(name, six.text_type) + for name in feeded_var_names)): + raise ValueError( + "'feed_var_names' should be a list of str.") + else: + feeded_var_names = [ + name.encode() for name in feeded_var_names + ] if isinstance(target_vars, Variable): target_vars = [target_vars] @@ -346,11 +642,12 @@ def save_inference_model(dirname, for i, op in enumerate(global_block.ops): op.desc.set_is_target(False) if op.type == "feed" or op.type == "fetch": - global_block.remove_op(i) + global_block._remove_op(i) copy_program.desc.flush() pruned_program = copy_program.prune(targets=target_vars) - inference_program = pruned_program.inference_optimize() + inference_program = pruned_program.inference_optimize( + export_for_deployment=export_for_deployment) fetch_var_names = [v.name for v in target_vars] prepend_feed_ops(inference_program, feeded_var_names) @@ -378,18 +675,49 @@ def load_inference_model(dirname, """ Load inference model from a directory - :param dirname: directory path - :param executor: executor that load inference model - :param model_filename: The name of file to load inference program. - If not specified, default filename `__model__` will be used. - :param params_filename: The name of file to load parameters. - It is used for the case that all parameters are saved in a single binary file. - If not specified, parameters are considered saved in separate files. - - :return: [program, feed_target_names, fetch_targets] - program: program especially for inference. - feed_target_names: Names of variables that need to feed data - fetch_targets: Variables from which we can get inference results. + Args: + dirname(str): The directory path + executor(Executor): The executor to run for loading inference model. + model_filename(str|None): The name of file to load inference program. + If it is None, the default filename + '__model__' will be used. + Default: None + params_filename(str|None): The name of file to load all parameters. + It is only used for the case that all + parameters were saved in a single binary + file. If parameters were saved in separate + files, set it as 'None'. + + Returns: + tuple: The return of this function is a tuple with three elements: + (program, feed_target_names, fetch_targets). The `program` is a + Program, it's the program for inference. The `feed_target_names` is + a list of str, it contains Names of variables that need to feed + data in the inference program. The `fetch_targets` is a list of + Variable. It contains variables from which we can get inference + results. + + Raises: + ValueError: If `dirname` is not a existing directory. + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./infer_model" + [inference_program, feed_target_names, fetch_targets] = + fluid.io.load_inference_model(dirname=path, executor=exe) + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_img}, + fetch_list=fetch_targets) + + # In this exsample, the inference program was saved in the + # "./infer_model/__model__" and parameters were saved in + # separate files in ""./infer_model". + # After getting inference program, feed target names and + # fetch targets, we can use an Executor to run the inference + # program to get the inference result. + """ if not os.path.isdir(dirname): raise ValueError("There is no directory named '%s'", dirname) @@ -420,12 +748,25 @@ def load_inference_model(dirname, def get_parameter_value(para, executor): """ - Get the LoDTensor for the parameter + Get the LoDTensor value of the given parameter. + + Args: + para(Parameter): The parameter to get value from. + executor(Executor): The executor to run for retrieving the value. + + Returns: + numpy.array: The given parameter's values. + + Raises: + AssertionError: If the `para` is not an instance of Parameter. - :param executor: executor for retrieving the value - :param para: the given parameter + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param = fluid.default_main_program().global_block().var('fc.w') + p = fluid.io.get_parameter_value(param, exe) - :return: the LoDTensor for the parameter """ assert is_parameter(para) @@ -437,14 +778,30 @@ def get_parameter_value(para, executor): def get_parameter_value_by_name(name, executor, program=None): """ - Get the LoDTensor for paramter with the given name + Get the LoDTensor value of a certain parameter by its name. + + Args: + name(str): The parameter's name. + executor(Executor): The executor to run for retrieving the value. + program(Program | None): The program where to find the parameter. + If it's set to be None, the function will + try to find the parameter in the default + main program. + + Returns: + numpy.array: The parameter's values. + + Raises: + TypeError: If given `name` is not an instance of basestring. + TypeError: If the parameter with the given name doesn't exist. + AssertionError: If there is a varibale named `name` in the + given program but it is not a Parameter. - :param executor: executor for retrieving the value - :param name: the name of the parameter - :param program: the program where the variable is found - Default default_main_program(). + Examples: + .. code-block:: python - :return: the LoDTensor for the variable + exe = fluid.Executor(fluid.CPUPlace()) + p = fluid.io.get_parameter_value('fc.w', exe) """ if program is None: program = default_main_program() diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 86efd1ff51..0c2b1eb795 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -14,12 +14,14 @@ import copy import itertools +import six -from framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating -import unique_name +from .framework import Variable, Parameter, default_main_program, default_startup_program, dtype_is_floating +from . import unique_name from paddle.fluid.initializer import Constant, Xavier -from param_attr import ParamAttr, WeightNormParamAttr -import core +from .param_attr import ParamAttr, WeightNormParamAttr +from . import core +from six.moves import zip class LayerHelper(object): @@ -68,11 +70,11 @@ class LayerHelper(object): @property def param_attr(self): - return ParamAttr.to_attr(self.kwargs.get('param_attr', None)) + return ParamAttr._to_attr(self.kwargs.get('param_attr', None)) @property def bias_attr(self): - return ParamAttr.to_attr(self.kwargs.get('bias_attr', None)) + return ParamAttr._to_attr(self.kwargs.get('bias_attr', None)) def multiple_param_attr(self, length): param_attr = self.param_attr @@ -83,7 +85,7 @@ class LayerHelper(object): raise ValueError("parameter number mismatch") elif len(param_attr) == 1 and length != 1: tmp = [None] * length - for i in xrange(length): + for i in range(length): tmp[i] = copy.deepcopy(param_attr[0]) param_attr = tmp return param_attr @@ -91,7 +93,7 @@ class LayerHelper(object): def iter_inputs_and_params(self, input_param_name='input'): inputs = self.multiple_input(input_param_name) param_attrs = self.multiple_param_attr(len(inputs)) - for ipt, param_attr in itertools.izip(inputs, param_attrs): + for ipt, param_attr in zip(inputs, param_attrs): yield ipt, param_attr def input_dtype(self, input_param_name='input'): @@ -218,7 +220,7 @@ class LayerHelper(object): norm = __norm_op(reshape, dim=0, block=block) __reshape_op(norm, out=out, shape=out_shape, block=block) else: - perm = range(len(x.shape)) + perm = list(range(len(x.shape))) perm[0], perm[dim] = dim, 0 transpose = __transpose_op(x, perm, block=block) norm = __norm_op(transpose, dim=0, block=block) @@ -262,11 +264,11 @@ class LayerHelper(object): g_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=g_param_shape, - **g_param_attr.to_kwargs(with_initializer=False)) + **g_param_attr._to_kwargs(with_initializer=False)) v_param = self.startup_program.global_block().create_parameter( dtype=dtype, shape=v_param_shape, - **v_param_attr.to_kwargs(with_initializer=True)) + **v_param_attr._to_kwargs(with_initializer=True)) __norm_except_dim( x=v_param, out=g_param, @@ -275,9 +277,9 @@ class LayerHelper(object): # Add weight normalization to main_program g_param = self.main_program.global_block().create_parameter( - dtype=dtype, shape=g_param_shape, **g_param_attr.to_kwargs()) + dtype=dtype, shape=g_param_shape, **g_param_attr._to_kwargs()) v_param = self.main_program.global_block().create_parameter( - dtype=dtype, shape=v_param_shape, **v_param_attr.to_kwargs()) + dtype=dtype, shape=v_param_shape, **v_param_attr._to_kwargs()) w_param = __weight_normalize(g_param, v_param, dim=attr.dim) return w_param @@ -296,11 +298,11 @@ class LayerHelper(object): if default_initializer is None and attr.initializer is None: if is_bias: - attr.set_default_bias_initializer() + attr._set_default_bias_initializer() else: - attr.set_default_param_initializer() + attr._set_default_param_initializer() else: - attr.set_default_initializer(default_initializer) + attr._set_default_initializer(default_initializer) # If weight normalization is set, insert extra parameters and ops. # Refer to https://arxiv.org/pdf/1602.07868.pdf @@ -310,9 +312,9 @@ class LayerHelper(object): return param self.startup_program.global_block().create_parameter( - dtype=dtype, shape=shape, **attr.to_kwargs(with_initializer=True)) + dtype=dtype, shape=shape, **attr._to_kwargs(with_initializer=True)) return self.main_program.global_block().create_parameter( - dtype=dtype, shape=shape, **attr.to_kwargs()) + dtype=dtype, shape=shape, **attr._to_kwargs()) def get_parameter(self, name): param = self.main_program.global_block().var(name) @@ -397,8 +399,10 @@ class LayerHelper(object): act = self.kwargs.get('act', None) if act is None: return input_var - if isinstance(act, basestring): + if isinstance(act, six.string_types): act = {'type': act} + else: + raise TypeError(str(act) + " should be unicode or str") if 'use_cudnn' in self.kwargs and self.kwargs.get('use_cudnn'): act['use_cudnn'] = self.kwargs.get('use_cudnn') diff --git a/python/paddle/fluid/layers/__init__.py b/python/paddle/fluid/layers/__init__.py index a568f61dcb..a48e360463 100644 --- a/python/paddle/fluid/layers/__init__.py +++ b/python/paddle/fluid/layers/__init__.py @@ -12,28 +12,27 @@ # See the License for the specific language governing permissions and # limitations under the License. -import ops -from ops import * -import nn -from nn import * -import io -from io import * -import tensor -from tensor import * -import control_flow -from control_flow import * -import device -from device import * -import math_op_patch -from math_op_patch import * -import detection -from detection import * -import metric -from metric import * -from learning_rate_scheduler import * +from . import ops +from .ops import * +from . import nn +from .nn import * +from . import io +from .io import * +from . import tensor +from .tensor import * +from . import control_flow +from .control_flow import * +from . import device +from .device import * +from . import math_op_patch +from .math_op_patch import * +from . import detection +from .detection import * +from . import metric_op +from .metric_op import * +from .learning_rate_scheduler import * __all__ = [] -__all__ += math_op_patch.__all__ __all__ += nn.__all__ __all__ += io.__all__ __all__ += tensor.__all__ @@ -41,5 +40,5 @@ __all__ += control_flow.__all__ __all__ += ops.__all__ __all__ += device.__all__ __all__ += detection.__all__ -__all__ += metric.__all__ +__all__ += metric_op.__all__ __all__ += learning_rate_scheduler.__all__ diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 4b707973e2..9fb7b4d0ca 100644 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -13,75 +13,69 @@ # limitations under the License. import contextlib -from layer_function_generator import autodoc -from tensor import assign, fill_constant +from .layer_function_generator import autodoc, templatedoc +from .tensor import assign, fill_constant from .. import core from ..framework import Program, Variable, Operator from ..layer_helper import LayerHelper, unique_name from ..initializer import force_init_on_cpu -from ops import logical_and, logical_not, logical_or +from .ops import logical_and, logical_not, logical_or +import numpy +import warnings +from functools import reduce __all__ = [ - 'split_lod_tensor', - 'merge_lod_tensor', - 'BlockGuard', - 'BlockGuardWithCompletion', - 'StaticRNNMemoryLink', - 'WhileGuard', 'While', 'Switch', - 'lod_rank_table', - 'max_sequence_len', - 'lod_tensor_to_array', - 'array_to_lod_tensor', 'increment', 'array_write', 'create_array', 'less_than', 'equal', 'array_read', - 'shrink_memory', 'array_length', 'IfElse', 'DynamicRNN', - 'ConditionalBlock', 'StaticRNN', 'reorder_lod_tensor_by_rank', 'ParallelDo', 'Print', + 'is_empty', ] def split_lod_tensor(input, mask, level=0): """ - **split_lod_tensor** - This function takes in an input that contains the complete lod information, and takes in a mask which is used to mask certain parts of the input. The output is the true branch and the false branch with the mask applied to - the input at a certain level in the tensor. + the input at a certain level in the tensor. Mainly used in IfElse to split + data into two parts. Args: input(tuple|list|None): The input tensor that contains complete lod information needed to construct the output. mask(list): A bool column vector which masks the input. - level(int): The specific lod level to rank. + level(int): The specific lod level to split. Returns: - Variable: The true branch of tensor as per the mask applied to input. - Variable: The false branch of tensor as per the mask applied to input. + tuple(Variable, Variable): + The true branch of tensor as per the mask applied to input. + + The false branch of tensor as per the mask applied to input. Examples: .. code-block:: python - x = layers.data(name='x', shape=[1]) + x = fluid.layers.data(name='x', shape=[1]) x.persistable = True - y = layers.data(name='y', shape=[1]) + y = fluid.layers.data(name='y', shape=[1]) y.persistable = True - out_true, out_false = layers.split_lod_tensor( + out_true, out_false = fluid.layers.split_lod_tensor( input=x, mask=y, level=level) + """ helper = LayerHelper('split_lod_tensor', **locals()) out_true = helper.create_tmp_variable(dtype=input.dtype) @@ -104,8 +98,9 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): This function takes in an input :math:`x`, the True branch, the False branch and a binary :math:`mask`. Using this information, this function - merges the True and False branches of the tensor into a single Output - at a certain lod level indiacted by :math:`level`. + merges the True and False branches of the tensor into a single tensor as + output at a certain lod level indicated by :math:`level`. Used in IfElse + to merge the output if True block and False Block. Args: in_true(tuple|list|None): The True branch to be merged. @@ -113,7 +108,7 @@ def merge_lod_tensor(in_true, in_false, x, mask, level=0): x(tuple|list|None): The input tensor that contains complete lod information needed to construct the output. mask(list): A bool column vector which masks the input. - level(int): The specific lod level to rank. + level(int): The specific lod level to merge. Returns: Variable: The merged output tensor. @@ -181,12 +176,14 @@ def Print(input, Returns: Variable: Output tensor, same data with input tensor. + Examples: + .. code-block:: python - value = some_layer(...) - Print(value, summarize=10, - message="The content of some_layer: ") + value = some_layer(...) + Print(value, summarize=10, + message="The content of some_layer: ") ''' helper = LayerHelper('print', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) @@ -232,12 +229,62 @@ class BlockGuard(object): class ParallelDo(object): """ - ParallelDo class. + ParallelDo is used to represent multi-thread data parallel processing. + + Its vanilla implementation can be shown as the following (:math:`|` means + single thread and :math:`||||` means multiple threads) + + .. code-block:: text + + In the forward pass + | Split input onto different devices + | Copy parameter onto different devices + |||| Compute forward pass in parallel + | Merge output from different devices + + In the backward pass + | Split output@grad onto different devices + |||| Compute backward pass in parallel + | accumulate param@grad from different devices to the first device + | Merge input@grad from different devices + | Copy param@grad to the place of parallel_do_op + + Examples: + + .. code-block:: python + + images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # ParallelDo version & Single-thread version + if thread_num > 1: + places = fluid.layers.get_places(thread_num) + pd = fluid.layers.ParallelDo(places) + with pd.do(): + images = pd.read_input(images) + label = pd.read_input(label) + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + + avg_cost = fluid.layers.mean(x=cost) + pd.write_output(avg_cost) + + avg_cost = pd() + avg_cost = fluid.layers.mean(avg_cost) + else: + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) - ParallelDo class is used to create a ParallelDo. + .. warning:: + + It will be soon deprecated, please use ParallelExecutor instead. """ def __init__(self, places, use_nccl=False, name=None): + warnings.warn( + "API ParallelDo is deprecated since 0.15.0. Please use ParallelExecutor instead.", + Warning) self.helper = LayerHelper("parallel_do", name=name) self.inputs = [] self.places = places @@ -296,7 +343,7 @@ class ParallelDo(object): return [parent_block.var(name) for name in params] - def complete_op(self): + def _complete_op(self): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() @@ -352,7 +399,7 @@ class BlockGuardWithCompletion(BlockGuard): if exc_type is not None: return False self.rnn.status = StaticRNN.AFTER_RNN_BLOCK - self.rnn.complete_op() + self.rnn._complete_op() return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val, exc_tb) @@ -361,16 +408,17 @@ class StaticRNNMemoryLink(object): """ StaticRNNMemoryLink class. - Args: - init: the initial variable for Memory - init: Variable - pre_mem: the memory variable in previous time step - pre_mem: Variable - mem: the memory variable in current time step - mem: Variable - StaticRNNMemoryLink class is used to create a link between two memory cells of a StaticRNN. + + + NOTE: This is a internal data structure of a very low-level API. + Please use StaticRNN instead. + + Args: + init(Variable): the initial variable for Memory. + pre_mem(Variable): the memory variable in previous time step. + mem(Variable): the memory variable in current time step. """ def __init__(self, init, pre_mem, mem=None): @@ -427,7 +475,7 @@ class StaticRNN(object): if shape is None or batch_ref is None: raise ValueError( "if init is None, memory at least need shape and batch_ref") - parent_block = self.parent_block() + parent_block = self._parent_block() var_name = unique_name.generate("@".join( [self.helper.name, "memory_boot"])) boot_var = parent_block.create_var( @@ -484,7 +532,7 @@ class StaticRNN(object): outputs={'Out': tmp_o}, attrs={'dtype': o.dtype}) - out_var = self.parent_block().create_var( + out_var = self._parent_block().create_var( name=tmp_o.name, shape=[self.seq_len] + list(tmp_o.shape), dtype=tmp_o.dtype) @@ -500,7 +548,7 @@ class StaticRNN(object): raise TypeError("update memory should take variables") self.memories[mem.name].mem = var - def parent_block(self): + def _parent_block(self): prog = self.helper.main_program parent_idx = prog.current_block().parent_idx assert parent_idx >= 0 @@ -517,10 +565,10 @@ class StaticRNN(object): else: return self.outputs - def complete_op(self): + def _complete_op(self): main_program = self.helper.main_program rnn_block = main_program.current_block() - parent_block = self.parent_block() + parent_block = self._parent_block() local_inputs = set() @@ -554,7 +602,7 @@ class StaticRNN(object): boot_memories = [] pre_memories = [] memories = [] - for _, mem in self.memories.iteritems(): + for _, mem in list(self.memories.items()): boot_memories.append(mem.init) pre_memories.append(mem.pre_mem.name) mem_var = rnn_block.var(mem.mem.name) @@ -600,11 +648,34 @@ class WhileGuard(BlockGuard): if exc_type is not None: return False self.while_op.status = While.AFTER_WHILE_BLOCK - self.while_op.complete() + self.while_op._complete() return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb) class While(object): + """ + while loop control flow. + + Args: + cond (Variable): condition used to compare. + name (str): The name of this layer. + + Examples: + .. code-block:: python + + d0 = layers.data("d0", shape=[10], dtype='float32') + data_array = layers.array_write(x=d0, i=i) + array_len = layers.fill_constant(shape=[1],dtype='int64', value=3) + + cond = layers.less_than(x=i, y=array_len) + while_op = layers.While(cond=cond) + with while_op.block(): + d = layers.array_read(array=data_array, i=i) + i = layers.increment(x=i, in_place=True) + layers.array_write(result, i=i, array=d) + layers.less_than(x=i, y=array_len, cond=cond) + """ + BEFORE_WHILE_BLOCK = 0 IN_WHILE_BLOCK = 1 AFTER_WHILE_BLOCK = 2 @@ -624,7 +695,7 @@ class While(object): def block(self): return WhileGuard(self) - def complete(self): + def _complete(self): main_program = self.helper.main_program while_block = main_program.current_block() parent_block = main_program.block(main_program.current_block() @@ -653,8 +724,10 @@ class While(object): parent_block.append_op( type='while', inputs={ - 'X': - [parent_block.var_recursive(x_name) for x_name in x_name_list], + 'X': [ + parent_block._var_recursive(x_name) + for x_name in x_name_list + ], 'Condition': [self.cond_var] }, outputs={'Out': out_vars, @@ -674,8 +747,8 @@ def lod_rank_table(x, level=0): .. code-block:: text x is a LoDTensor: - x.lod = [[0, 2, 3], - [0, 5, 6, 7]] + x.lod = [[2, 1], + [5, 1, 1]] x.data = [a, b, c, d, e, f, g] 1. set level to 0: @@ -705,7 +778,7 @@ def lod_rank_table(x, level=0): .. code-block:: python x = fluid.layers.data(name='x', shape=[10], - dtype='float32', lod_level=1) + dtype='float32', lod_level=1) out = layers.lod_rank_table(x=x, level=0) """ helper = LayerHelper("lod_rank_table", **locals()) @@ -720,26 +793,22 @@ def lod_rank_table(x, level=0): return table +@templatedoc() def max_sequence_len(rank_table): - """Max Sequence Len Operator. Given a LoDRankTable object, this layer - returns the max length of a batch of sequences. In fact, a LoDRankTable - object contains a list of tuples() and - the list is already sorted by sequence length in descending order, so the - operator just returns the sequence length of the first tuple element. + """ + ${comment} + + >>> import paddle.fluid as fluid + >>> x = fluid.layers.data(name='x', shape=[10], dtype='float32', + >>> lod_level=1) + >>> rank_table = layers.lod_rank_table(x=x, level=0) + >>> max_seq_len = layers.max_sequence_len(rank_table) Args: - rank_table (Variable): Input variable which is a LoDRankTable object. + rank_table(${rank_table_type}): ${rank_table_comment}. Returns: - Variable: The max length of sequence. - - Examples: - .. code-block:: python - - x = fluid.layers.data(name='x', shape=[10], - dtype='float32', lod_level=1) - rank_table = layers.lod_rank_table(x=x, level=0) - max_seq_len = layers.max_sequence_len(rank_table) + ${out_comment}. """ helper = LayerHelper("max_seqence_len", **locals()) res = helper.create_tmp_variable(dtype="int64") @@ -751,17 +820,25 @@ def max_sequence_len(rank_table): def lod_tensor_to_array(x, table): - """ Convert a LOD_TENSOR to an LOD_TENSOR_ARRAY. + """ + Convert a LoDTensor to a LoDTensorArray. + + This function split a LoDTesnor to a LoDTensorArray according to its LoD + information. LoDTensorArray is an alias of C++ std::vector in + PaddlePaddle. The generated LoDTensorArray of this function can be further read + or written by `read_from_array()` and `write_to_array()` operators. However, + this function is generally an internal component of PaddlePaddle `DynamicRNN`. + Users should not use it directly. Args: - x (Variable|list): The LOD tensor to be converted to a LOD tensor array. + x (Variable|list): The LoDTensor to be converted to a LoDTensorArray. table (ParamAttr|list): The variable that stores the level of lod which is ordered by sequence length in - descending order. + descending order. It is generally generated + by `layers.lod_rank_table()` API. Returns: - Variable: The variable of type array that has been converted from a - tensor. + Variable: The LoDTensorArray that has been converted from the input tensor. Examples: .. code-block:: python @@ -826,8 +903,7 @@ def increment(x, value=1.0, in_place=True): in_place (bool): If the increment should be performed in-place. Returns: - Variable: The tensor variable storing the transformation of - element-wise increment of each value in the input. + Variable: The elementwise-incremented object. Examples: .. code-block:: python @@ -869,7 +945,7 @@ def array_write(x, i, array=None): Variable: The output LOD_TENSOR_ARRAY where the input tensor is written. Examples: - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) @@ -890,14 +966,17 @@ def array_write(x, i, array=None): def create_array(dtype): - """This function creates an array of type :math:`LOD_TENSOR_ARRAY` using the - LayerHelper. + """ + **Create LoDTensorArray** + + This function creates an array of LOD_TENSOR_ARRAY . It is mainly used to + implement RNN with array_write, array_read and While. Args: - dtype (int|float): The data type of the elements in the array. + dtype (int|float): The data type of the elements in the lod_tensor_array. Returns: - Variable: The tensor variable storing the elements of data type. + Variable: The lod_tensor_array variable storing the elements of data type. Examples: .. code-block:: python @@ -912,37 +991,40 @@ def create_array(dtype): dtype=dtype) -def less_than(x, y, force_cpu=True, cond=None, **ignored): +@templatedoc() +def less_than(x, y, force_cpu=None, cond=None, **ignored): """ - **Less than** + ${comment} - This layer returns the truth value of :math:`x < y` elementwise. + >>> import paddle.fluid as fluid + >>> less = fluid.layers.less_than(x=label, y=limit) Args: - x(Variable): First operand of *less_than* - y(Variable): Second operand of *less_than* - force_cpu(Bool|True): The output data will be on CPU if set true. + x(${x_type}): ${x_comment}. + y(${y_type}): ${y_comment}. + force_cpu(${force_cpu_type}): ${force_cpu_comment}. cond(Variable|None): Optional output variable to store the result of *less_than* Returns: - Variable: The tensor variable storing the output of *less_than*. - - Examples: - .. code-block:: python - - less = fluid.layers.less_than(x=label, y=limit) + ${out_comment}. """ helper = LayerHelper("less_than", **locals()) if cond is None: cond = helper.create_tmp_variable(dtype='bool') cond.stop_gradient = True + attrs = dict() + if force_cpu is not None: + attrs['force_cpu'] = force_cpu + elif force_init_on_cpu(): + attrs['force_cpu'] = force_init_on_cpu() + helper.append_op( type='less_than', inputs={'X': [x], 'Y': [y]}, outputs={'Out': [cond]}, - attrs={'force_cpu': force_cpu or force_init_on_cpu()}) + attrs=attrs) return cond @@ -977,16 +1059,34 @@ def equal(x, y, cond=None, **ignored): def array_read(array, i): - """This function performs the operation to read the data in as an + """ + This function performs the operation to read the data in as an LOD_TENSOR_ARRAY. + + .. code-block:: text + + Given: + + array = [0.6, 0.1, 0.3, 0.1] + + And: + + i = 2 + + Then: + + output = 0.3 + Args: - array (Variable|list): The input tensor that will be written to an array. - i (Variable|list): The subscript index in tensor array, that points the - place where data will be written to. + array (Variable|list): The input tensor that store data to be read. + i (Variable|list): The index of the data to be read from input array. + Returns: Variable: The tensor type variable that has the data written to it. + Examples: - .. code-block::python + .. code-block:: python + tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) arr = layers.array_read(tmp, i=i) @@ -1007,8 +1107,28 @@ def array_read(array, i): def shrink_memory(x, i, table): """ - This function creates an operator to shrink_rnn_memory using the RankTable + This function creates an operator to shrink rnn memory using the RankTable as mentioned in the input parameter. + + NOTE: This API is very low-level API. It is used by DynamicRNN only. + + Since the Dynamic RNN uses no-padding way to implement RNN. The sequence + will be sorted by order, and the length of valid memory will be shrink after + each time step. + + Args: + x(Variable): The memory object in the previous time step. + i(Variable): The step count variable. A int scalar as LoDTensor. + table(Variable): The RNNRankTable object. + + Returns: + the memory variable after shrink. + + Examples: + + Since this API is very low level API. The example is not provided. + Please reference the implementation of class DynamicRNN for detail + usage. """ helper = LayerHelper('shrink_memory', **locals()) out = helper.create_tmp_variable(dtype=x.dtype) @@ -1023,9 +1143,14 @@ def shrink_memory(x, i, table): def array_length(array): - """This function performs the operation to find the length of the input + """ + **Get the Length of Input LoDTensorArray** + + This function performs the operation to find the length of the input LOD_TENSOR_ARRAY. + Related API: array_read, array_write, While. + Args: array (LOD_TENSOR_ARRAY): The input array that will be used to compute the length. @@ -1034,12 +1159,13 @@ def array_length(array): Variable: The length of the input LoDTensorArray. Examples: - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) arr = fluid.layers.array_write(tmp, i=i) arr_len = fluid.layers.array_length(arr) + """ helper = LayerHelper('array_length', **locals()) tmp = helper.create_tmp_variable(dtype='int64') @@ -1050,6 +1176,13 @@ def array_length(array): class ConditionalBlockGuard(BlockGuard): + """ + ConditionalBlockGuard is derived from BlockGuard. It is dedicated for + holding a ConditionalBlock, and helping users entering and exiting the + ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard + is generally an internal component of IfElse, users should not use it directly. + """ + def __init__(self, block): if not isinstance(block, ConditionalBlock): raise TypeError("block should be conditional block") @@ -1066,6 +1199,31 @@ class ConditionalBlockGuard(BlockGuard): class ConditionalBlock(object): + ''' + **ConditionalBlock** + + ConditionalBlock is an operator that bind a block to a specific condition, + if the condition matches, the corresponding block will be executed. + + Args: + inputs (Variable): bool conditions. + is_scalar_condition (bool): whether the branch is controled by a scalar. + name(str): name of this ConditionalBlock. + + Examples: + .. code-block:: python + + cond = layers.less_than(x=label, y=limit) + true_image, false_image = layers.split_lod_tensor( + input=image, mask=cond) + true_cond = layers.ConditionalBlock([true_image]) + + with true_cond.block(): + ... + with false_cond.block(): + ... + ''' + def __init__(self, inputs, is_scalar_condition=False, name=None): for each_input in inputs: if not isinstance(each_input, Variable): @@ -1097,7 +1255,7 @@ class ConditionalBlock(object): input_set = set([ipt.name for ipt in self.inputs]) param_list = [ - parent_block.var(each_name) for each_name in params + parent_block._var_recursive(each_name) for each_name in params if each_name not in input_set ] @@ -1123,6 +1281,42 @@ class ConditionalBlock(object): class Switch(object): + """ + Switch class works just like a `if-elif-else`. Can be used in learning rate scheduler + to modify learning rate + + The Semantics: + + 1. A `switch` control-flow checks cases one-by-one. + + 2. The condition of each case is a boolean value, which is a scalar Variable. + + 3. It runs the first matched case, or the default case if there is one. + + 4. Once it matches a case, it runs the corresponding branch and only that branch. + + Examples: + .. code-block:: python + + lr = fluid.layers.tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") + one_var = tensor.fill_constant( + shape=[1], dtype='float32', value=1.0) + two_var = tensor.fill_constant( + shape=[1], dtype='float32', value=2.0) + + with fluid.layers.control_flow.Switch() as switch: + with switch.case(global_step == zero_var): + fluid.layers.tensor.assign(input=one_var, output=lr) + with switch.default(): + fluid.layers.tensor.assign(input=two_var, output=lr) + + """ + def __init__(self, name=None): self.helper = LayerHelper('switch', name=name) self.inside_scope = False @@ -1152,7 +1346,8 @@ class Switch(object): return ConditionalBlockGuard(cond_block) def default(self): - """create a default case for this switch + """ + create a default case for this switch """ pre_cond_num = len(self.pre_not_conditions) if pre_cond_num == 0: @@ -1212,6 +1407,34 @@ class IfElseBlockGuard(object): class IfElse(object): + """ + if-else control flow. + + Args: + cond (Variable): condition used to compare. + name (str, default None): The name of this layer. + + Examples: + .. code-block:: python + + limit = fluid.layers.fill_constant_batch_size_like( + input=label, dtype='int64', shape=[1], value=5.0) + cond = fluid.layers.less_than(x=label, y=limit) + ie = fluid.layers.IfElse(cond) + with ie.true_block(): + true_image = ie.input(image) + hidden = fluid.layers.fc(input=true_image, size=100, act='tanh') + prob = fluid.layers.fc(input=hidden, size=10, act='softmax') + ie.output(prob) + + with ie.false_block(): + false_image = ie.input(image) + hidden = fluid.layers.fc( + input=false_image, size=200, act='tanh') + prob = fluid.layers.fc(input=hidden, size=10, act='softmax') + ie.output(prob) + prob = ie() + """ OUT_IF_ELSE_BLOCKS = 0 IN_IF_ELSE_TRUE_BLOCKS = 1 IN_IF_ELSE_FALSE_BLOCKS = 2 @@ -1231,7 +1454,7 @@ class IfElse(object): if self.status == IfElse.OUT_IF_ELSE_BLOCKS: raise ValueError("input must in true/false blocks") if id(x) not in self.input_table: - parent_block = self.parent_block() + parent_block = self._parent_block() out_true = parent_block.create_var( name=unique_name.generate('ifelse_input' + self.helper.name), dtype=x.dtype) @@ -1257,7 +1480,7 @@ class IfElse(object): else: return out_false - def parent_block(self): + def _parent_block(self): current_block = self.helper.main_program.current_block() return self.helper.main_program.block(current_block.parent_idx) @@ -1273,7 +1496,7 @@ class IfElse(object): out_table = self.output_table[1 if self.status == self.IN_IF_ELSE_TRUE_BLOCKS else 0] - parent_block = self.parent_block() + parent_block = self._parent_block() for each_out in outs: if not isinstance(each_out, Variable): raise TypeError("Each output should be a variable") @@ -1290,7 +1513,7 @@ class IfElse(object): def __call__(self): if self.status != self.OUT_IF_ELSE_BLOCKS: raise ValueError("IfElse::__call__ must be out of sub-block") - false_len, true_len = map(len, self.output_table) + false_len, true_len = list(map(len, self.output_table)) if false_len == 0 and true_len == 0: raise ValueError("Must invoke true_block/false_block before " "__call__") @@ -1314,6 +1537,38 @@ class IfElse(object): class DynamicRNN(object): + """ + The dynamic RNN can process a batch of sequence data. The length of each + sample sequence can be different. This API automatically process them in + batch. + + The input lod must be set. Please reference `lod_tensor` + + >>> import paddle.fluid as fluid + >>> data = fluid.layers.data(name='sentence', dtype='int64', lod_level=1) + >>> embedding = fluid.layers.embedding(input=data, size=[65535, 32], + >>> is_sparse=True) + >>> + >>> drnn = fluid.layers.DynamicRNN() + >>> with drnn.block(): + >>> word = drnn.step_input(embedding) + >>> prev = drnn.memory(shape=[200]) + >>> hidden = fluid.layers.fc(input=[word, prev], size=200, act='relu') + >>> drnn.update_memory(prev, hidden) # set prev to hidden + >>> drnn.output(hidden) + >>> + >>> # last is the last time step of rnn. It is the encoding result. + >>> last = fluid.layers.sequence_last_step(drnn()) + + The dynamic RNN will unfold sequence into timesteps. Users need to define + how to process each time step during the :code:`with` block. + + The `memory` is used staging data cross time step. The initial value of + memory can be zero or another variable. + + The dynamic RNN can mark multiple variables as its output. Use `drnn()` to + get the output sequence. + """ BEFORE_RNN = 0 IN_RNN = 1 AFTER_RNN = 2 @@ -1336,6 +1591,15 @@ class DynamicRNN(object): self.mem_link = [] def step_input(self, x): + """ + Mark a sequence as a dynamic RNN input. + Args: + x(Variable): The input sequence. + + Returns: + The current timestep in the input sequence. + + """ self._assert_in_rnn_block_("step_input") if not isinstance(x, Variable): raise TypeError( @@ -1379,6 +1643,15 @@ class DynamicRNN(object): return array_read(array=input_array, i=self.step_idx) def static_input(self, x): + """ + Mark a variable as a RNN input. The input will not be scattered into + time steps. + Args: + x(Variable): The input variable. + + Returns: + The input variable that can access in RNN. + """ self._assert_in_rnn_block_("static_input") if not isinstance(x, Variable): raise TypeError( @@ -1400,6 +1673,10 @@ class DynamicRNN(object): @contextlib.contextmanager def block(self): + """ + The block for user to define operators in RNN. See the class docstring + for more details. + """ if self.status != DynamicRNN.BEFORE_RNN: raise ValueError("rnn.block() can only be invoke once") self.step_idx = fill_constant( @@ -1426,6 +1703,9 @@ class DynamicRNN(object): x=each_array, table=self.lod_rank_table)) def __call__(self, *args, **kwargs): + """ + Get the output of RNN. This API should only be invoked after RNN.block() + """ if self.status != DynamicRNN.AFTER_RNN: raise ValueError(("Output of the dynamic RNN can only be visited " "outside the rnn block.")) @@ -1440,6 +1720,70 @@ class DynamicRNN(object): value=0.0, need_reorder=False, dtype='float32'): + """ + Create a memory variable for dynamic rnn. + + If the :code:`init` is not None, :code:`memory` will be initialized by + this variable. The :code:`need_reorder` is used to reorder the memory as + the input variable. It should be set to true when the initialized memory + depends on the input sample. + + For example, + + >>> import paddle.fluid as fluid + >>> sentence = fluid.layers.data( + >>> name='sentence', dtype='float32', shape=[32]) + >>> boot_memory = fluid.layers.data( + >>> name='boot', dtype='float32', shape=[10]) + >>> + >>> drnn = fluid.layers.DynamicRNN() + >>> with drnn.block(): + >>> word = drnn.step_input(sentence) + >>> memory = drnn.memory(init=boot_memory, need_reorder=True) + >>> hidden = fluid.layers.fc( + >>> input=[word, memory], size=10, act='tanh') + >>> drnn.update_memory(ex_mem=memory, new_mem=hidden) + >>> drnn.output(hidden) + >>> rnn_output = drnn() + + + Otherwise, if :code:`shape`, :code:`value`, :code:`dtype` are set, the + :code:`memory` will be initialized by this :code:`value`. + + For example, + + >>> import paddle.fluid as fluid + >>> sentence = fluid.layers.data( + >>> name='sentence', dtype='float32', shape=[32]) + >>> + >>> drnn = fluid.layers.DynamicRNN() + >>> with drnn.block(): + >>> word = drnn.step_input(sentence) + >>> memory = drnn.memory(shape=[10], dtype='float32', value=0) + >>> hidden = fluid.layers.fc( + >>> input=[word, memory], size=10, act='tanh') + >>> drnn.update_memory(ex_mem=memory, new_mem=hidden) + >>> drnn.output(hidden) + >>> rnn_output = drnn() + + + Args: + init(Variable|None): The initialized variable. + + shape(list|tuple): The memory shape. NOTE the shape does not contain + batch_size. + + value(float): the initalized value. + + need_reorder(bool): True if the initialized memory depends on the + input sample. + + dtype(str|numpy.dtype): The data type of the initialized memory. + + Returns: + the memory variable. + + """ self._assert_in_rnn_block_('memory') if init is not None: if not isinstance(init, Variable): @@ -1507,6 +1851,16 @@ class DynamicRNN(object): return self.memory(init=init) def update_memory(self, ex_mem, new_mem): + """ + Update the memory from ex_mem to new_mem. NOTE that the shape and data + type of :code:`ex_mem` and :code:`new_mem` must be same. + Args: + ex_mem(Variable): the memory variable. + new_mem(Variable): the plain variable generated in RNN block. + + Returns: + None + """ self._assert_in_rnn_block_('update_memory') if not isinstance(ex_mem, Variable): raise TypeError("The input arg `ex_mem` of update_memory() must " @@ -1524,6 +1878,15 @@ class DynamicRNN(object): self.mem_link.append((new_mem, mem_array)) def output(self, *outputs): + """ + mark the RNN output variables. + + Args: + outputs: The output variables. + + Returns: + None + """ self._assert_in_rnn_block_('output') parent_block = self._parent_block_() for each in outputs: @@ -1562,3 +1925,40 @@ def reorder_lod_tensor_by_rank(x, rank_table): 'RankTable': [rank_table]}, outputs={'Out': [out]}) return out + + +def is_empty(x, cond=None, **ignored): + """ + Test whether a Variable is empty. + + Args: + x (Variable): The Variable to be tested. + cond (Variable|None): Output parameter. Returns the test result + of given 'x'. Default: None + + Returns: + Variable: A bool scalar. True if 'x' is an empty Variable. + + Raises: + TypeError: If input cond is not a variable, or cond's dtype is + not bool. + + Examples: + .. code-block:: python + + res = fluid.layers.is_empty(x=input) + # or: + fluid.layers.is_empty(x=input, cond=res) + """ + helper = LayerHelper("is_empty", **locals()) + if cond is None: + cond = helper.create_tmp_variable(dtype='bool') + cond.stop_gradient = True + elif not isinstance(cond, Variable): + raise TypeError("cond takes a variable") + elif cond.dtype != 'bool': + raise TypeError("The data type of cond must be bool") + + helper.append_op( + type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]}) + return cond diff --git a/python/paddle/fluid/layers/detection.py b/python/paddle/fluid/layers/detection.py index a5938fe494..0800c02d9e 100644 --- a/python/paddle/fluid/layers/detection.py +++ b/python/paddle/fluid/layers/detection.py @@ -15,25 +15,30 @@ All layers just related to the detection neural network. """ -from layer_function_generator import generate_layer_fn -from layer_function_generator import autodoc +from .layer_function_generator import generate_layer_fn +from .layer_function_generator import autodoc, templatedoc from ..layer_helper import LayerHelper -import tensor -import nn +from . import tensor +from . import nn import math +from functools import reduce __all__ = [ + 'prior_box', 'multi_box_head', 'bipartite_match', 'target_assign', 'detection_output', 'ssd_loss', 'detection_map', + 'rpn_target_assign', + 'anchor_generator', ] __auto__ = [ 'iou_similarity', 'box_coder', + 'polygon_box_transform', ] __all__ += __auto__ @@ -42,6 +47,135 @@ for _OP in set(__auto__): globals()[_OP] = generate_layer_fn(_OP) +def rpn_target_assign(loc, + scores, + anchor_box, + gt_box, + rpn_batch_size_per_im=256, + fg_fraction=0.25, + rpn_positive_overlap=0.7, + rpn_negative_overlap=0.3): + """ + ** Target Assign Layer for region proposal network (RPN) in Faster-RCNN detection. ** + + This layer can be, for given the Intersection-over-Union (IoU) overlap + between anchors and ground truth boxes, to assign classification and + regression targets to each each anchor, these target labels are used for + train RPN. The classification targets is a binary class label (of being + an object or not). Following the paper of Faster-RCNN, the positive labels + are two kinds of anchors: (i) the anchor/anchors with the highest IoU + overlap with a ground-truth box, or (ii) an anchor that has an IoU overlap + higher than rpn_positive_overlap(0.7) with any ground-truth box. Note + that a single ground-truth box may assign positive labels to multiple + anchors. A non-positive anchor is when its IoU ratio is lower than + rpn_negative_overlap (0.3) for all ground-truth boxes. Anchors that are + neither positive nor negative do not contribute to the training objective. + The regression targets are the encoded ground-truth boxes associated with + the positive anchors. + + Args: + loc(Variable): A 3-D Tensor with shape [N, M, 4] represents the + predicted locations of M bounding bboxes. N is the batch size, + and each bounding box has four coordinate values and the layout + is [xmin, ymin, xmax, ymax]. + scores(Variable): A 3-D Tensor with shape [N, M, C] represents the + predicted confidence predictions. N is the batch size, C is the + class number, M is number of bounding boxes. For each category + there are total M scores which corresponding M bounding boxes. + anchor_box(Variable): A 2-D Tensor with shape [M, 4] holds M boxes, + each box is represented as [xmin, ymin, xmax, ymax], + [xmin, ymin] is the left top coordinate of the anchor box, + if the input is image feature map, they are close to the origin + of the coordinate system. [xmax, ymax] is the right bottom + coordinate of the anchor box. + gt_box (Variable): The ground-truth boudding boxes (bboxes) are a 2D + LoDTensor with shape [Ng, 4], Ng is the total number of ground-truth + bboxes of mini-batch input. + rpn_batch_size_per_im(int): Total number of RPN examples per image. + fg_fraction(float): Target fraction of RoI minibatch that is labeled + foreground (i.e. class > 0), 0-th class is background. + rpn_positive_overlap(float): Minimum overlap required between an anchor + and ground-truth box for the (anchor, gt box) pair to be a positive + example. + rpn_negative_overlap(float): Maximum overlap allowed between an anchor + and ground-truth box for the (anchor, gt box) pair to be a negative + examples. + + Returns: + tuple: + A tuple(predicted_scores, predicted_location, target_label, + target_bbox) is returned. The predicted_scores and + predicted_location is the predicted result of the RPN. + The target_label and target_bbox is the ground truth, + respectively. The predicted_location is a 2D Tensor with shape + [F, 4], and the shape of target_bbox is same as the shape of + the predicted_location, F is the number of the foreground + anchors. The predicted_scores is a 2D Tensor with shape + [F + B, 1], and the shape of target_label is same as the shape + of the predicted_scores, B is the number of the background + anchors, the F and B is depends on the input of this operator. + + Examples: + .. code-block:: python + + loc = layers.data(name='location', shape=[2, 80], + append_batch_size=False, dtype='float32') + scores = layers.data(name='scores', shape=[2, 40], + append_batch_size=False, dtype='float32') + anchor_box = layers.data(name='anchor_box', shape=[20, 4], + append_batch_size=False, dtype='float32') + gt_box = layers.data(name='gt_box', shape=[10, 4], + append_batch_size=False, dtype='float32') + loc_pred, score_pred, loc_target, score_target = + fluid.layers.detection_output(loc=location, + scores=scores, + anchor_box=anchor_box, + gt_box=gt_box) + """ + + helper = LayerHelper('rpn_target_assign', **locals()) + # 1. Compute the regression target bboxes + target_bbox = box_coder( + prior_box=anchor_box, + target_box=gt_box, + code_type='encode_center_size', + box_normalized=False) + + # 2. Compute overlaps between the prior boxes and the gt boxes overlaps + iou = iou_similarity(x=gt_box, y=anchor_box) + + # 3. Assign target label to anchors + loc_index = helper.create_tmp_variable(dtype=anchor_box.dtype) + score_index = helper.create_tmp_variable(dtype=anchor_box.dtype) + target_label = helper.create_tmp_variable(dtype=anchor_box.dtype) + helper.append_op( + type="rpn_target_assign", + inputs={'Overlap': iou, }, + outputs={ + 'LocationIndex': loc_index, + 'ScoreIndex': score_index, + 'TargetLabel': target_label, + }, + attrs={ + 'rpn_batch_size_per_im': rpn_batch_size_per_im, + 'rpn_positive_overlap': rpn_positive_overlap, + 'rpn_negative_overlap': rpn_negative_overlap, + 'fg_fraction': fg_fraction, + }) + + # 4. Reshape and gather the target entry + scores = nn.reshape(x=scores, shape=(-1, 2)) + loc = nn.reshape(x=loc, shape=(-1, 4)) + target_label = nn.reshape(x=target_label, shape=(-1, 1)) + target_bbox = nn.reshape(x=target_bbox, shape=(-1, 4)) + + predicted_scores = nn.gather(scores, score_index) + predicted_location = nn.gather(loc, loc_index) + target_label = nn.gather(target_label, score_index) + target_bbox = nn.gather(target_bbox, loc_index) + return predicted_scores, predicted_loc, target_label, target_bbox + + def detection_output(loc, scores, prior_box, @@ -96,7 +230,9 @@ def detection_output(loc, nms_eta(float): The parameter for adaptive NMS. Returns: - Variable: The detection outputs is a LoDTensor with shape [No, 6]. + Variable: + + The detection outputs is a LoDTensor with shape [No, 6]. Each row has six values: [label, confidence, xmin, ymin, xmax, ymax]. `No` is the total number of detections in this mini-batch. For each instance, the offsets in first dimension are called LoD, the offset @@ -109,15 +245,15 @@ def detection_output(loc, Examples: .. code-block:: python - pb = layers.data(name='prior_box', shape=[10, 4], + pb = layers.data(name='prior_box', shape=[10, 4], append_batch_size=False, dtype='float32') - pbv = layers.data(name='prior_box_var', shape=[10, 4], + pbv = layers.data(name='prior_box_var', shape=[10, 4], append_batch_size=False, dtype='float32') - loc = layers.data(name='target_box', shape=[2, 21, 4], + loc = layers.data(name='target_box', shape=[2, 21, 4], append_batch_size=False, dtype='float32') - scores = layers.data(name='scores', shape=[2, 21, 10], + scores = layers.data(name='scores', shape=[2, 21, 10], append_batch_size=False, dtype='float32') - nmsed_outs = fluid.layers.detection_output(scores=scores, + nmsed_outs = fluid.layers.detection_output(scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv) @@ -152,7 +288,7 @@ def detection_output(loc, return nmsed_outs -@autodoc() +@templatedoc() def detection_map(detect_res, label, class_num, @@ -163,6 +299,47 @@ def detection_map(detect_res, input_states=None, out_states=None, ap_version='integral'): + """ + ${comment} + + Args: + detect_res: ${detect_res_comment} + label: ${label_comment} + class_num: ${class_num_comment} + background_label: ${background_label_comment} + overlap_threshold: ${overlap_threshold_comment} + evaluate_difficult: ${evaluate_difficult_comment} + has_state: ${has_state_comment} + input_states: If not None, It contains 3 elements: + 1. pos_count ${pos_count_comment}. + 2. true_pos ${true_pos_comment}. + 3. false_pos ${false_pos_comment}. + out_states: If not None, it contains 3 elements. + 1. accum_pos_count ${accum_pos_count_comment}. + 2. accum_true_pos ${accum_true_pos_comment}. + 3. accum_false_pos ${accum_false_pos_comment}. + ap_version: ${ap_type_comment} + + Returns: + ${map_comment} + + + Examples: + .. code-block:: python + + detect_res = fluid.layers.data( + name='detect_res', + shape=[10, 6], + append_batch_size=False, + dtype='float32') + label = fluid.layers.data( + name='label', + shape=[10, 6], + append_batch_size=False, + dtype='float32') + + map_out = fluid.layers.detection_map(detect_res, label, 21) + """ helper = LayerHelper("detection_map", **locals()) def __create_var(type): @@ -209,53 +386,68 @@ def bipartite_match(dist_matrix, dist_threshold=None, name=None): """ - **Bipartite matchint operator** - - This operator is a greedy bipartite matching algorithm, which is used to - obtain the matching with the maximum distance based on the input + This operator implements a greedy bipartite matching algorithm, which is + used to obtain the matching with the maximum distance based on the input distance matrix. For input 2D matrix, the bipartite matching algorithm can - find the matched column for each row, also can find the matched row for - each column. And this operator only calculate matched indices from column - to row. For each instance, the number of matched indices is the number of - of columns of the input ditance matrix. - - There are two outputs to save matched indices and distance. - A simple description, this algothrim matched the best (maximum distance) + find the matched column for each row (matched means the largest distance), + also can find the matched row for each column. And this operator only + calculate matched indices from column to row. For each instance, + the number of matched indices is the column number of the input distance + matrix. + + There are two outputs, matched indices and distance. + A simple description, this algorithm matched the best (maximum distance) row entity to the column entity and the matched indices are not duplicated in each row of ColToRowMatchIndices. If the column entity is not matched any row entity, set -1 in ColToRowMatchIndices. - Please note that the input DistMat can be LoDTensor (with LoD) or Tensor. + NOTE: the input DistMat can be LoDTensor (with LoD) or Tensor. If LoDTensor with LoD, the height of ColToRowMatchIndices is batch size. If Tensor, the height of ColToRowMatchIndices is 1. + NOTE: This API is a very low level API. It is used by :code:`ssd_loss` + layer. Please consider to use :code:`ssd_loss` instead. + Args: dist_matrix(Variable): This input is a 2-D LoDTensor with shape [K, M]. It is pair-wise distance matrix between the entities represented by each row and each column. For example, assumed one entity is A with shape [K], another entity is B with shape [M]. The - dist_matirx[i][j] is the distance between A[i] and B[j]. The bigger - the distance is, the better macthing the pairs are. Please note, - This tensor can contain LoD information to represent a batch of - inputs. One instance of this batch can contain different numbers of - entities. + dist_matrix[i][j] is the distance between A[i] and B[j]. The bigger + the distance is, the better matching the pairs are. + + NOTE: This tensor can contain LoD information to represent a batch + of inputs. One instance of this batch can contain different numbers + of entities. match_type(string|None): The type of matching method, should be - 'bipartite' or 'per_prediction', 'bipartite' by defalut. + 'bipartite' or 'per_prediction'. [default 'bipartite']. dist_threshold(float|None): If `match_type` is 'per_prediction', this threshold is to determine the extra matching bboxes based - on the maximum distance, 0.5 by defalut. + on the maximum distance, 0.5 by default. Returns: - match_indices(Variable): A 2-D Tensor with shape [N, M] in int type. - N is the batch size. If match_indices[i][j] is -1, it - means B[j] does not match any entity in i-th instance. - Otherwise, it means B[j] is matched to row - match_indices[i][j] in i-th instance. The row number of - i-th instance is saved in match_indices[i][j]. - match_distance(Variable): A 2-D Tensor with shape [N, M] in float type. - N is batch size. If match_indices[i][j] is -1, - match_distance[i][j] is also -1.0. Otherwise, assumed - match_distance[i][j] = d, and the row offsets of each instance - are called LoD. Then match_distance[i][j] = dist_matrix[d+LoD[i]][j]. + tuple: a tuple with two elements is returned. The first is + matched_indices, the second is matched_distance. + + The matched_indices is a 2-D Tensor with shape [N, M] in int type. + N is the batch size. If match_indices[i][j] is -1, it + means B[j] does not match any entity in i-th instance. + Otherwise, it means B[j] is matched to row + match_indices[i][j] in i-th instance. The row number of + i-th instance is saved in match_indices[i][j]. + + The matched_distance is a 2-D Tensor with shape [N, M] in float type + . N is batch size. If match_indices[i][j] is -1, + match_distance[i][j] is also -1.0. Otherwise, assumed + match_distance[i][j] = d, and the row offsets of each instance + are called LoD. Then match_distance[i][j] = + dist_matrix[d+LoD[i]][j]. + + Examples: + + >>> x = fluid.layers.data(name='x', shape=[4], dtype='float32') + >>> y = fluid.layers.data(name='y', shape=[4], dtype='float32') + >>> iou = fluid.layers.iou_similarity(x=x, y=y) + >>> matched_indices, matched_dist = fluid.layers.bipartite_match(iou) """ helper = LayerHelper('bipartite_match', **locals()) match_indices = helper.create_tmp_variable(dtype='int32') @@ -280,8 +472,6 @@ def target_assign(input, mismatch_value=None, name=None): """ - **Target assigner operator** - This operator can be, for given the target bounding boxes or labels, to assign classification and regression targets to each prediction as well as weights to prediction. The weights is used to specify which prediction would @@ -295,20 +485,24 @@ def target_assign(input, 1. Assigning all outpts based on `match_indices`: - If id = match_indices[i][j] > 0, + .. code-block:: text - out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K] - out_weight[i][j] = 1. + If id = match_indices[i][j] > 0, - Otherwise, + out[i][j][0 : K] = X[lod[i] + id][j % P][0 : K] + out_weight[i][j] = 1. - out[j][j][0 : K] = {mismatch_value, mismatch_value, ...} - out_weight[i][j] = 0. + Otherwise, + + out[j][j][0 : K] = {mismatch_value, mismatch_value, ...} + out_weight[i][j] = 0. 2. Assigning out_weight based on `neg_indices` if `neg_indices` is provided: Assumed that the row offset for each instance in `neg_indices` is called neg_lod, for i-th instance and each `id` of neg_indices in this instance: + + .. code-block:: text out[i][id][0 : K] = {mismatch_value, mismatch_value, ...} out_weight[i][id] = 1.0 @@ -325,10 +519,22 @@ def target_assign(input, mismatch_value (float32): Fill this value to the mismatched location. Returns: - out (Variable): The output is a 3D Tensor with shape [N, P, K], - N and P is the same as they are in `neg_indices`, K is the - same as it in input of X. If `match_indices[i][j]`. - out_weight (Variable): The weight for output with the shape of [N, P, 1]. + tuple: + A tuple(out, out_weight) is returned. out is a 3D Tensor with + shape [N, P, K], N and P is the same as they are in + `neg_indices`, K is the same as it in input of X. If + `match_indices[i][j]`. out_weight is the weight for output with + the shape of [N, P, 1]. + + Examples: + + .. code-block:: python + + matched_indices, matched_dist = fluid.layers.bipartite_match(iou) + gt = layers.data( + name='gt', shape=[1, 1], dtype='int32', lod_level=1) + trg, trg_weight = layers.target_assign( + gt, matched_indices, mismatch_value=0) """ helper = LayerHelper('target_assign', **locals()) out = helper.create_tmp_variable(dtype=input.dtype) @@ -363,7 +569,7 @@ def ssd_loss(location, normalize=True, sample_size=None): """ - **Multi-box loss layer for object dection algorithm of SSD** + **Multi-box loss layer for object detection algorithm of SSD** This layer is to compute dection loss for SSD given the location offset predictions, confidence predictions, prior boxes and ground-truth boudding @@ -371,21 +577,35 @@ def ssd_loss(location, is a weighted sum of the localization loss (or regression loss) and confidence loss (or classification loss) by performing the following steps: - 1. Find matched boundding box by bipartite matching algorithm. + 1. Find matched bounding box by bipartite matching algorithm. + 1.1 Compute IOU similarity between ground-truth boxes and prior boxes. + 1.2 Compute matched boundding box by bipartite matching algorithm. + 2. Compute confidence for mining hard examples + 2.1. Get the target label based on matched indices. + 2.2. Compute confidence loss. + 3. Apply hard example mining to get the negative example indices and update the matched indices. + 4. Assign classification and regression targets + 4.1. Encoded bbox according to the prior boxes. + 4.2. Assign regression targets. + 4.3. Assign classification targets. + 5. Compute the overall objective loss. + 5.1 Compute confidence loss. + 5.1 Compute localization loss. + 5.3 Compute the overall weighted loss. Args: @@ -420,39 +640,36 @@ def ssd_loss(location, mining_type (str): The hard example mining type, should be 'hard_example' or 'max_negative', now only support `max_negative`. normalize (bool): Whether to normalize the SSD loss by the total number - of output locations, True by defalut. + of output locations, True by default. sample_size (int): The max sample size of negative box, used only when mining_type is 'hard_example'. Returns: - Variable: The weighted sum of the localization loss and confidence loss, - with shape [N * Np, 1], N and Np are the same as they are - in `location`. + The weighted sum of the localization loss and confidence loss, with \ + shape [N * Np, 1], N and Np are the same as they are in `location`. Raises: - ValueError: If mining_type is 'hard_example', now only support - mining type of `max_negative`. + ValueError: If mining_type is 'hard_example', now only support mining \ + type of `max_negative`. Examples: - .. code-block:: python - - pb = layers.data( - name='prior_box', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - pbv = layers.data( - name='prior_box_var', - shape=[10, 4], - append_batch_size=False, - dtype='float32') - loc = layers.data(name='target_box', shape=[10, 4], dtype='float32') - scores = layers.data(name='scores', shape=[10, 21], dtype='float32') - gt_box = layers.data( - name='gt_box', shape=[4], lod_level=1, dtype='float32') - gt_label = layers.data( - name='gt_label', shape=[1], lod_level=1, dtype='float32') - loss = layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) + >>> pb = fluid.layers.data( + >>> name='prior_box', + >>> shape=[10, 4], + >>> append_batch_size=False, + >>> dtype='float32') + >>> pbv = fluid.layers.data( + >>> name='prior_box_var', + >>> shape=[10, 4], + >>> append_batch_size=False, + >>> dtype='float32') + >>> loc = fluid.layers.data(name='target_box', shape=[10, 4], dtype='float32') + >>> scores = fluid.layers.data(name='scores', shape=[10, 21], dtype='float32') + >>> gt_box = fluid.layers.data( + >>> name='gt_box', shape=[4], lod_level=1, dtype='float32') + >>> gt_label = fluid.layers.data( + >>> name='gt_label', shape=[1], lod_level=1, dtype='float32') + >>> loss = fluid.layers.ssd_loss(loc, scores, gt_box, gt_label, pb, pbv) """ helper = LayerHelper('ssd_loss', **locals()) @@ -505,7 +722,7 @@ def ssd_loss(location, }, attrs={ 'neg_pos_ratio': neg_pos_ratio, - 'neg_dist_threshold': neg_pos_ratio, + 'neg_dist_threshold': neg_overlap, 'mining_type': mining_type, 'sample_size': sample_size, }) @@ -564,6 +781,126 @@ def ssd_loss(location, return loss +def prior_box(input, + image, + min_sizes, + max_sizes=None, + aspect_ratios=[1.], + variance=[0.1, 0.1, 0.2, 0.2], + flip=False, + clip=False, + steps=[0.0, 0.0], + offset=0.5, + name=None, + min_max_aspect_ratios_order=False): + """ + **Prior Box Operator** + + Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. + Each position of the input produce N prior boxes, N is determined by + the count of min_sizes, max_sizes and aspect_ratios, The size of the + box is in range(min_size, max_size) interval, which is generated in + sequence according to the aspect_ratios. + + Args: + input(Variable): The Input Variables, the format is NCHW. + image(Variable): The input image data of PriorBoxOp, + the layout is NCHW. + min_sizes(list|tuple|float value): min sizes of generated prior boxes. + max_sizes(list|tuple|None): max sizes of generated prior boxes. + Default: None. + aspect_ratios(list|tuple|float value): the aspect ratios of generated + prior boxes. Default: [1.]. + variance(list|tuple): the variances to be encoded in prior boxes. + Default:[0.1, 0.1, 0.2, 0.2]. + flip(bool): Whether to flip aspect ratios. Default:False. + clip(bool): Whether to clip out-of-boundary boxes. Default: False. + step(list|turple): Prior boxes step across width and height, If + step[0] == 0.0/step[1] == 0.0, the prior boxes step across + height/weight of the input will be automatically calculated. + Default: [0., 0.] + offset(float): Prior boxes center offset. Default: 0.5 + name(str): Name of the prior box op. Default: None. + min_max_aspect_ratios_order(bool): If set True, the output prior box is + in order of [min, max, aspect_ratios], which is consistent with + Caffe. Please note, this order affects the weights order of + convolution layer followed by and does not affect the final + detection results. Default: False. + + Returns: + tuple: A tuple with two Variable (boxes, variances) + + boxes: the output prior boxes of PriorBox. + The layout is [H, W, num_priors, 4]. + H is the height of input, W is the width of input, + num_priors is the total + box count of each position of input. + + variances: the expanded variances of PriorBox. + The layout is [H, W, num_priors, 4]. + H is the height of input, W is the width of input + num_priors is the total + box count of each position of input + + + Examples: + .. code-block:: python + + box, var = fluid.layers.prior_box( + input=conv1, + image=images, + min_sizes=[100.], + flip=True, + clip=True) + """ + helper = LayerHelper("prior_box", **locals()) + dtype = helper.input_dtype() + + def _is_list_or_tuple_(data): + return (isinstance(data, list) or isinstance(data, tuple)) + + if not _is_list_or_tuple_(min_sizes): + min_sizes = [min_sizes] + if not _is_list_or_tuple_(aspect_ratios): + aspect_ratios = [aspect_ratios] + if not (_is_list_or_tuple_(steps) and len(steps) == 2): + raise ValueError('steps should be a list or tuple ', + 'with length 2, (step_width, step_height).') + + min_sizes = list(map(float, min_sizes)) + aspect_ratios = list(map(float, aspect_ratios)) + steps = list(map(float, steps)) + + attrs = { + 'min_sizes': min_sizes, + 'aspect_ratios': aspect_ratios, + 'variances': variance, + 'flip': flip, + 'clip': clip, + 'step_w': steps[0], + 'step_h': steps[1], + 'offset': offset, + 'min_max_aspect_ratios_order': min_max_aspect_ratios_order + } + if max_sizes is not None and len(max_sizes) > 0 and max_sizes[0] > 0: + if not _is_list_or_tuple_(max_sizes): + max_sizes = [max_sizes] + attrs['max_sizes'] = max_sizes + + box = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="prior_box", + inputs={"Input": input, + "Image": image}, + outputs={"Boxes": box, + "Variances": var}, + attrs=attrs, ) + box.stop_gradient = True + var.stop_gradient = True + return box, var + + def multi_box_head(inputs, image, base_size, @@ -583,13 +920,12 @@ def multi_box_head(inputs, kernel_size=1, pad=0, stride=1, - name=None): + name=None, + min_max_aspect_ratios_order=False): """ - **Prior_boxes** - Generate prior boxes for SSD(Single Shot MultiBox Detector) algorithm. The details of this algorithm, please refer the - section 2.2 of SSD paper (SSD: Single Shot MultiBox Detector) + section 2.2 of SSD paper `SSD: Single Shot MultiBox Detector `_ . Args: @@ -628,26 +964,34 @@ def multi_box_head(inputs, pad(int|list|tuple): The padding of conv2d. Default:0. stride(int|list|tuple): The stride of conv2d. Default:1, name(str): Name of the prior box layer. Default: None. + min_max_aspect_ratios_order(bool): If set True, the output prior box is + in order of [min, max, aspect_ratios], which is consistent with + Caffe. Please note, this order affects the weights order of + convolution layer followed by and does not affect the fininal + detection results. Default: False. Returns: - mbox_loc(Variable): The predicted boxes' location of the inputs. - The layout is [N, H*W*Priors, 4]. where Priors - is the number of predicted boxes each position of each input. - mbox_conf(Variable): The predicted boxes' confidence of the inputs. - The layout is [N, H*W*Priors, C]. where Priors - is the number of predicted boxes each position of each input - and C is the number of Classes. - boxes(Variable): the output prior boxes of PriorBox. - The layout is [num_priors, 4]. num_priors is the total - box count of each position of inputs. - Variances(Variable): the expanded variances of PriorBox. - The layout is [num_priors, 4]. num_priors is the total - box count of each position of inputs + tuple: A tuple with four Variables. (mbox_loc, mbox_conf, boxes, variances) + + mbox_loc: The predicted boxes' location of the inputs. The layout + is [N, H*W*Priors, 4]. where Priors is the number of predicted + boxes each position of each input. + + mbox_conf: The predicted boxes' confidence of the inputs. The layout + is [N, H*W*Priors, C]. where Priors is the number of predicted boxes + each position of each input and C is the number of Classes. + + boxes: the output prior boxes of PriorBox. The layout is [num_priors, 4]. + num_priors is the total box count of each position of inputs. + + variances: the expanded variances of PriorBox. The layout is + [num_priors, 4]. num_priors is the total box count of each position of inputs Examples: .. code-block:: python - mbox_locs, mbox_confs, box, var = layers.multi_box_head( + + mbox_locs, mbox_confs, box, var = fluid.layers.multi_box_head( inputs=[conv1, conv2, conv3, conv4, conv5, conv5], image=images, num_classes=21, @@ -660,47 +1004,6 @@ def multi_box_head(inputs, clip=True) """ - def _prior_box_(input, - image, - min_sizes, - max_sizes, - aspect_ratios, - variance, - flip=False, - clip=False, - step_w=0.0, - step_h=0.0, - offset=0.5, - name=None): - helper = LayerHelper("prior_box", **locals()) - dtype = helper.input_dtype() - - attrs = { - 'min_sizes': min_sizes, - 'aspect_ratios': aspect_ratios, - 'variances': variance, - 'flip': flip, - 'clip': clip, - 'step_w': step_w, - 'step_h': step_h, - 'offset': offset - } - if len(max_sizes) > 0 and max_sizes[0] > 0: - attrs['max_sizes'] = max_sizes - - box = helper.create_tmp_variable(dtype) - var = helper.create_tmp_variable(dtype) - helper.append_op( - type="prior_box", - inputs={"Input": input, - "Image": image}, - outputs={"Boxes": box, - "Variances": var}, - attrs=attrs, ) - box.stop_gradient = True - var.stop_gradient = True - return box, var - def _reshape_with_axis_(input, axis=1): if not (axis > 0 and axis < len(input.shape)): raise ValueError("The axis should be smaller than " @@ -730,7 +1033,7 @@ def multi_box_head(inputs, min_sizes = [] max_sizes = [] step = int(math.floor(((max_ratio - min_ratio)) / (num_layer - 2))) - for ratio in xrange(min_ratio, max_ratio + 1, step): + for ratio in range(min_ratio, max_ratio + 1, step): min_sizes.append(base_size * ratio / 100.) max_sizes.append(base_size * (ratio + step) / 100.) min_sizes = [base_size * .10] + min_sizes @@ -777,11 +1080,11 @@ def multi_box_head(inputs, aspect_ratio = aspect_ratios[i] if not _is_list_or_tuple_(aspect_ratio): aspect_ratio = [aspect_ratio] + step = [step_w[i] if step_w else 0.0, step_h[i] if step_w else 0.0] - box, var = _prior_box_(input, image, min_size, max_size, aspect_ratio, - variance, flip, clip, step_w[i] - if step_w else 0.0, step_h[i] - if step_w else 0.0, offset) + box, var = prior_box(input, image, min_size, max_size, aspect_ratio, + variance, flip, clip, step, offset, None, + min_max_aspect_ratios_order) box_results.append(box) var_results.append(var) @@ -841,3 +1144,95 @@ def multi_box_head(inputs, box.stop_gradient = True var.stop_gradient = True return mbox_locs_concat, mbox_confs_concat, box, var + + +def anchor_generator(input, + anchor_sizes=None, + aspect_ratios=None, + variance=[0.1, 0.1, 0.2, 0.2], + stride=None, + offset=0.5, + name=None): + """ + **Anchor generator operator** + + Generate anchors for Faster RCNN algorithm. + Each position of the input produce N anchors, N = + size(anchor_sizes) * size(aspect_ratios). The order of generated anchors + is firstly aspect_ratios loop then anchor_sizes loop. + + Args: + input(Variable): The input feature map, the format is NCHW. + anchor_sizes(list|tuple|float): The anchor sizes of generated anchors, + given in absolute pixels e.g. [64., 128., 256., 512.]. + For instance, the anchor size of 64 means the area of this anchor equals to 64**2. + aspect_ratios(list|tuple|float): The height / width ratios of generated + anchors, e.g. [0.5, 1.0, 2.0]. + variance(list|tuple): The variances to be used in box regression deltas. + Default:[0.1, 0.1, 0.2, 0.2]. + stride(list|turple): The anchors stride across width and height, + e.g. [16.0, 16.0] + offset(float): Prior boxes center offset. Default: 0.5 + name(str): Name of the prior box op. Default: None. + + Returns: + Anchors(Variable): The output anchors with a layout of [H, W, num_anchors, 4]. + H is the height of input, W is the width of input, + num_anchors is the box count of each position. + Each anchor is in (xmin, ymin, xmax, ymax) format an unnormalized. + Variances(Variable): The expanded variances of anchors + with a layout of [H, W, num_priors, 4]. + H is the height of input, W is the width of input + num_anchors is the box count of each position. + Each variance is in (xcenter, ycenter, w, h) format. + + + Examples: + + .. code-block:: python + + anchor, var = anchor_generator( + input=conv1, + anchor_sizes=[64, 128, 256, 512], + aspect_ratios=[0.5, 1.0, 2.0], + variance=[0.1, 0.1, 0.2, 0.2], + stride=[16.0, 16.0], + offset=0.5) + """ + helper = LayerHelper("anchor_generator", **locals()) + dtype = helper.input_dtype() + + def _is_list_or_tuple_(data): + return (isinstance(data, list) or isinstance(data, tuple)) + + if not _is_list_or_tuple_(anchor_sizes): + anchor_sizes = [anchor_sizes] + if not _is_list_or_tuple_(aspect_ratios): + aspect_ratios = [aspect_ratios] + if not (_is_list_or_tuple_(stride) and len(stride) == 2): + raise ValueError('stride should be a list or tuple ', + 'with length 2, (stride_width, stride_height).') + + anchor_sizes = list(map(float, anchor_sizes)) + aspect_ratios = list(map(float, aspect_ratios)) + stride = list(map(float, stride)) + + attrs = { + 'anchor_sizes': anchor_sizes, + 'aspect_ratios': aspect_ratios, + 'variances': variance, + 'stride': stride, + 'offset': offset + } + + anchor = helper.create_tmp_variable(dtype) + var = helper.create_tmp_variable(dtype) + helper.append_op( + type="anchor_generator", + inputs={"Input": input}, + outputs={"Anchors": anchor, + "Variances": var}, + attrs=attrs, ) + anchor.stop_gradient = True + var.stop_gradient = True + return anchor, var diff --git a/python/paddle/fluid/layers/device.py b/python/paddle/fluid/layers/device.py index e0c1aab230..bb1fb7fd57 100644 --- a/python/paddle/fluid/layers/device.py +++ b/python/paddle/fluid/layers/device.py @@ -15,13 +15,15 @@ All util layers. """ -from layer_function_generator import autodoc +from .layer_function_generator import autodoc from ..framework import unique_name from ..layer_helper import LayerHelper +from ..annotations import deprecated -__all__ = ['get_places'] +__all__ = [] +@deprecated(since='0.15.0', instead="ParallelExecutor") @autodoc() def get_places(device_count=None, device_type=None): helper = LayerHelper('get_places', **locals()) diff --git a/python/paddle/fluid/layers/io.py b/python/paddle/fluid/layers/io.py index 0a6befd148..327ae30981 100644 --- a/python/paddle/fluid/layers/io.py +++ b/python/paddle/fluid/layers/io.py @@ -11,17 +11,24 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import contextlib +import multiprocessing +import threading +from ..data_feeder import DataFeeder +from .control_flow import BlockGuard +from .layer_function_generator import templatedoc from .. import core -from ..framework import convert_np_dtype_to_dtype_, default_main_program, default_startup_program, Program -from ..unique_name import generate as unique_name -from control_flow import BlockGuard -from ..layer_helper import LayerHelper from ..executor import global_scope +from ..framework import convert_np_dtype_to_dtype_, default_main_program, \ + default_startup_program, program_guard, Program +from ..layer_helper import LayerHelper +from ..unique_name import generate as unique_name __all__ = [ - 'data', 'BlockGuardServ', 'ListenAndServ', 'Send', 'open_recordio_file', - 'open_files', 'read_file', 'shuffle', 'batch', 'double_buffer' + 'data', 'open_recordio_file', 'open_files', 'read_file', 'shuffle', 'batch', + 'double_buffer', 'random_data_generator', 'py_reader', 'Preprocessor', + 'load' ] @@ -62,7 +69,7 @@ def data(name, """ helper = LayerHelper('data', **locals()) shape = list(shape) - for i in xrange(len(shape)): + for i in range(len(shape)): if shape[i] is None: shape[i] = -1 append_batch_size = False @@ -78,8 +85,8 @@ def data(name, dtype=dtype, type=type, stop_gradient=stop_gradient, - lod_level=lod_level) - data_var.is_data = True + lod_level=lod_level, + is_data=True) return data_var @@ -106,10 +113,35 @@ class BlockGuardServ(BlockGuard): class ListenAndServ(object): """ - ListenAndServ class. + **ListenAndServ Layer** + + ListenAndServ is used to create a rpc server bind and listen + on specific TCP port, this server will run the sub-block when + received variables from clients. - ListenAndServ class is used to wrap listen_and_serv op to create a server - which can receive variables from clients and run a block. + Args: + endpoint(string): IP:port string which the server will listen on. + inputs(list): a list of variables that the server will get from clients. + fan_in(int): how many client are expected to report to this server, default: 1. + optimizer_mode(bool): whether to run the server as a parameter server, default: True. + + Examples: + .. code-block:: python + + with fluid.program_guard(main): + serv = layers.ListenAndServ( + "127.0.0.1:6170", ["X"], optimizer_mode=False) + with serv.do(): + x = layers.data( + shape=[32, 32], + dtype='float32', + name="X", + append_batch_size=False) + fluid.initializer.Constant(value=1.0)(x, main.global_block()) + layers.scale(x=x, scale=10.0, out=out_var) + + exe = fluid.Executor(place) + exe.run(main) """ def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True): @@ -158,7 +190,6 @@ class ListenAndServ(object): main_program = self.helper.main_program current_block = main_program.current_block() parent_block = self.parent_block() - empty_block = Program().global_block() parent_block.append_op( type='listen_and_serv', @@ -167,25 +198,25 @@ class ListenAndServ(object): attrs={ 'endpoint': self.endpoint, 'Fanin': self.fan_in, - 'OptimizeBlock': current_block, - 'PrefetchBlock': empty_block, + 'optimize_blocks': [ + current_block + ], # did not support multiple optimize blocks in layers 'sync_mode': True, # did not support async now in layers 'grad_to_block_id': [""] }) -def Send(endpoints, send_vars, get_vars=None): +def Send(endpoints, send_vars, sync=True): """ - Send layer + Send variables to the server side, and get vars from server + side when server have finished running server side program. Args: - endpoints: comma seperated IP:PORT pairs in the order + endpoints (str): comma seperated IP:PORT pairs in the order of send_vars to send - send_vars: vars to send - get_vars: vars to get from server after send completes. + send_vars (list): variables to send to server + sync (bool): whether to wait the request finish - Send variables to the server side, and get vars from server - side when server have finished running server side program. """ assert (type(send_vars) == list) @@ -193,38 +224,33 @@ def Send(endpoints, send_vars, get_vars=None): endpoints = list(set(epmap)) helper = LayerHelper("Send", **locals()) - rpc_client_var = default_main_program().global_block().create_var( - name="RPC_CLIENT_VAR", persistable=True, type=core.VarDesc.VarType.RAW) - if not get_vars: - get_vars = [] - for s in send_vars: - v = helper.create_tmp_variable(dtype=s.dtype, stop_gradient=True) - get_vars.append(v) + rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName() helper.append_op( type="send", inputs={"X": send_vars}, - outputs={"Out": get_vars, - "RPCClient": rpc_client_var}, - attrs={"endpoints": endpoints, - "epmap": epmap}) - return get_vars + attrs={ + "endpoints": endpoints, + "epmap": epmap, + rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC + }) + if sync: + helper.append_op(type="send_barrier", attrs={"endpoints": endpoints}) -def Recv(endpoints, get_vars): +def Recv(endpoints, get_vars, sync=True): """ - Recv layer + Receive variables from server side Args: - endpoints: comma seperated IP:PORT pairs in the order + endpoints (str): comma seperated IP:PORT pairs in the order of send_vars to send - send_vars: vars to send - get_vars: vars to get from server after send completes. + get_vars (list): vars to get from server after send completes. + sync (bool): whether to wait the request finish - Send variables to the server side, and get vars from server - side when server have finished running server side program. + Returns: + list: list of received variables """ - assert (type(send_vars) == list) assert (type(get_vars) == list) epmap = endpoints.split(",") @@ -237,6 +263,9 @@ def Recv(endpoints, get_vars): outputs={"Out": get_vars}, attrs={"endpoints": endpoints, "epmap": epmap}) + if sync: + helper.append_op(type="fetch_barrier", attrs={"endpoints": endpoints}) + return get_vars def monkey_patch_reader_methods(reader): @@ -287,6 +316,7 @@ def _copy_reader_create_op_(block, op): return new_op +@templatedoc(op_type='create_recordio_file_reader') def open_recordio_file(filename, shapes, lod_levels, @@ -294,34 +324,30 @@ def open_recordio_file(filename, pass_num=1, for_parallel=True): """ - Open a RecordIO file - - This layer takes a RecordIO file to read from and returns a Reader Variable. - Via the Reader Variable, we can get data from the given RecordIO file. + ${comment} Args: - filename(str): The RecordIO file's name. + filename(${filename_type}): ${filename_comment}. shapes(list): List of tuples which declaring data shapes. - lod_levels(list): List of ints which declaring data lod_level. + lod_levels(${lod_levels_type}): ${lod_levels_comment}. dtypes(list): List of strs which declaring data type. pass_num(int): Number of passes to run. for_parallel(Bool): Set it as True if you are going to run subsequent operators in parallel. Returns: - Variable: A Reader Variable via which we can get RecordIO file data. + ${out_comment}. Examples: - .. code-block:: python - - reader = fluid.layers.io.open_recordio_file( - filename='./data.recordio', - shapes=[(3,224,224), (1)], - lod_levels=[0, 0], - dtypes=['float32', 'int64']) - # Via the reader, we can use 'read_file' layer to get data: - image, label = fluid.layers.read_file(reader) + >>> import paddle.fluid as fluid + >>> reader = fluid.layers.io.open_recordio_file( + >>> filename='./data.recordio', + >>> shapes=[(3,224,224), (1)], + >>> lod_levels=[0, 0], + >>> dtypes=['float32', 'int64']) + >>> # Via the reader, we can use 'read_file' layer to get data: + >>> image, label = fluid.layers.io.read_file(reader) """ dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] shape_concat = [] @@ -353,37 +379,351 @@ def open_recordio_file(filename, if pass_num > 1: main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num) - if for_parallel: - main_prog_var = parallel(reader=main_prog_var) + return monkey_patch_reader_methods(main_prog_var) + + +def random_data_generator(low, high, shapes, lod_levels, for_parallel=True): + """ + Create a uniform random data generator + + This layer returns a Reader Variable. + Instead of opening a file and reading data from it, this + Reader Variable generates float uniform random data by itself. + It can be used as a dummy reader to test a network without + opening a real file. + + Args: + low(float): The lower bound of data's uniform distribution. + high(float): The upper bound of data's uniform distribution. + shapes(list): List of tuples which declaring data shapes. + lod_levels(list): List of ints which declaring data lod_level. + for_parallel(Bool): Set it as True if you are going to run + subsequent operators in parallel. + + Returns: + Variable: A Reader Variable from which we can get random data. + + Examples: + + .. code-block:: python + + reader = fluid.layers.random_data_generator( + low=0.0, + high=1.0, + shapes=[[3,224,224], [1]], + lod_levels=[0, 0]) + # Via the reader, we can use 'read_file' layer to get data: + image, label = fluid.layers.read_file(reader) + """ + dtypes = [core.VarDesc.VarType.FP32] * len(shapes) + shape_concat = [] + ranks = [] + + for shape in shapes: + shape_concat.extend(shape) + ranks.append(len(shape)) + + var_name = unique_name('random_data_generator') + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=var_name) + startup_blk.append_op( + type='create_random_data_generator', + outputs={'Out': [startup_var]}, + attrs={ + 'low': low, + 'high': high, + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) return monkey_patch_reader_methods(main_prog_var) +def py_reader(capacity, + shapes, + dtypes, + lod_levels=None, + name=None, + use_double_buffer=True): + """ + Create a Python reader for data feeding in Python + + This layer returns a Reader Variable. + The Reader provides :code:`decorate_paddle_reader()` and + :code:`decorate_tensor_provider()` to set a Python generator as the data + source in Python side. When :code:`Executor::Run()` is invoked in C++ + side, the data from the generator would be read automatically. Unlike + :code:`DataFeeder.feed()`, the data reading process and + :code:`Executor::Run()` process can run in parallel using + :code:`py_reader`. The :code:`start()` method of the Reader should be + called when each pass begins, while the :code:`reset()` method should be + called when the pass ends and :code:`fluid.core.EOFException` raises. + Note that :code:`Program.clone()` method cannot clone :code:`py_reader`. + + Args: + capacity(int): The buffer capacity maintained by :code:`py_reader`. + shapes(list|tuple): List of tuples which declaring data shapes. + dtypes(list|tuple): List of strs which declaring data type. + lod_levels(list|tuple): List of ints which declaring data lod_level. + name(basestring): The prefix Python queue name and Reader name. None will + be generated automatically. + use_double_buffer(bool): Whether use double buffer or not. + + Returns: + Variable: A Reader from which we can get feeding data. + + Examples: + + 1. The basic usage of :code:`py_reader` is as follows: + + >>> import paddle.v2 + >>> import paddle.fluid as fluid + >>> import paddle.dataset.mnist as mnist + >>> + >>> reader = fluid.layers.py_reader(capacity=64, + >>> shapes=[(-1,3,224,224), (-1,1)], + >>> dtypes=['float32', 'int64']) + >>> reader.decorate_paddle_reader( + >>> paddle.v2.reader.shuffle(paddle.batch(mnist.train()) + >>> + >>> img, label = fluid.layers.read_file(reader) + >>> loss = network(img, label) # some network definition + >>> + >>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program()) + >>> + >>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + >>> for epoch_id in range(10): + >>> reader.start() + >>> try: + >>> while True: + >>> exe.run(fetch_list=[loss.name]) + >>> except fluid.core.EOFException: + >>> reader.reset() + + 2. When training and testing are both performed, two different + :code:`py_reader` should be created with different names, e.g.: + + >>> import paddle.v2 + >>> import paddle.fluid as fluid + >>> import paddle.dataset.mnist as mnist + >>> + >>> def network(reader): + >>> img, label = fluid.layers.read_file(reader) + >>> # Here, we omitted the network definition + >>> return loss + >>> + >>> train_reader = fluid.layers.py_reader(capacity=64, + >>> shapes=[(-1,3,224,224), (-1,1)], + >>> dtypes=['float32', 'int64'], + >>> name='train_reader') + >>> train_reader.decorate_paddle_reader( + >>> paddle.v2.reader.shuffle(paddle.batch(mnist.train()) + >>> + >>> test_reader = fluid.layers.py_reader(capacity=32, + >>> shapes=[(-1,3,224,224), (-1,1)], + >>> dtypes=['float32', 'int64'], + >>> name='test_reader') + >>> test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) + >>> + >>> # Create train_main_prog and train_startup_prog + >>> train_main_prog = fluid.Program() + >>> train_startup_prog = fluid.Program() + >>> with fluid.program_guard(train_main_prog, train_startup_prog): + >>> # Use fluid.unique_name.guard() to share parameters with test program + >>> with fluid.unique_name.guard(): + >>> train_loss = network(train_reader) # some network definition + >>> adam = fluid.optimizer.Adam(learning_rate=0.01) + >>> adam.minimize(loss) + >>> + >>> # Create test_main_prog and test_startup_prog + >>> test_main_prog = fluid.Program() + >>> test_startup_prog = fluid.Program() + >>> with fluid.program_guard(test_main_prog, test_startup_prog): + >>> # Use fluid.unique_name.guard() to share parameters with train program + >>> with fluid.unique_name.guard(): + >>> test_loss = network(test_reader) + >>> + >>> fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog) + >>> fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog) + >>> + >>> train_exe = fluid.ParallelExecutor(use_cuda=True, + >>> loss_name=train_loss.name, main_program=train_main_prog) + >>> test_exe = fluid.ParallelExecutor(use_cuda=True, + >>> loss_name=test_loss.name, main_program=test_main_prog) + >>> for epoch_id in range(10): + >>> train_reader.start() + >>> try: + >>> while True: + >>> train_exe.run(fetch_list=[train_loss.name]) + >>> except fluid.core.EOFException: + >>> train_reader.reset() + >>> + >>> test_reader.start() + >>> try: + >>> while True: + >>> test_exe.run(fetch_list=[test_loss.name]) + >>> except fluid.core.EOFException: + >>> test_reader.reset() + """ + dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] + shape_concat = [] + ranks = [] + + for shape in shapes: + shape_concat.extend(shape) + ranks.append(len(shape)) + + if lod_levels is None: + lod_levels = [0] * len(shapes) + + if name is None: + queue_name = unique_name('lod_tensor_blocking_queue') + reader_name = unique_name('create_py_reader') + double_buffer_name = unique_name('double_buffer') + else: + queue_name = "_".join([name, "queue"]) + reader_name = "_".join([name, "reader"]) + double_buffer_name = "_".join([name, "double_buffer"]) + + var = global_scope().var(queue_name) + feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes) + + startup_blk = default_startup_program().current_block() + startup_var = startup_blk.create_var(name=reader_name) + startup_blk.append_op( + type='create_py_reader', + inputs={'blocking_queue': [queue_name]}, + outputs={'Out': [startup_var]}, + attrs={ + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks + }) + + startup_var.desc.set_dtypes(dtypes) + startup_var.persistable = True + + main_prog_var = _copy_reader_var_(default_main_program().current_block(), + startup_var) + + reader = monkey_patch_reader_methods(main_prog_var) + if use_double_buffer: + double_buffer_reader = double_buffer(reader, name=double_buffer_name) + # we return a double buffer reader. However, the reset method comes from + # py_reader. + double_buffer_reader.reset = reader.reset + reader = double_buffer_reader + + # monkey patch py_reader special methods + reader.queue = feed_queue + current_reset_method = reader.reset + reader.thread = None + reader.tensor_provider = None + reader.exited = False + + def start_provide_thread(func): + def __provider_thread__(): + for tensors in func(): + array = core.LoDTensorArray() + for item in tensors: + if not isinstance(item, core.LoDTensor): + tmp = core.LoDTensor() + tmp.set(item, core.CPUPlace()) + item = tmp + + array.append(item) + + if reader.exited: + break + feed_queue.push(array) + if reader.exited: + break + feed_queue.close() + + reader.thread = threading.Thread(target=__provider_thread__) + reader.thread.daemon = True + reader.thread.start() + + def __set_tensor_provider__(func): + reader.tensor_provider = func + + def __set_paddle_reader__(paddle_reader): + with program_guard(Program(), Program()): + feed_list = [] + counter = 0 + for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels): + name = str(counter) + feed_list.append( + data( + name=name, + dtype=dtype, + shape=shape, + lod_level=lod_level)) + counter += 1 + + feeder = DataFeeder(feed_list=feed_list, place=core.CPUPlace()) + paddle_reader = feeder.decorate_reader( + paddle_reader, multi_devices=False) + + def __tensor_provider__(): + for slots in paddle_reader(): + yield [slots[str(idx)] for idx in xrange(counter)] + + __set_tensor_provider__(__tensor_provider__) + + def __reset__(): + current_reset_method() + if reader.thread is not None and reader.tensor_provider is not None: + reader.exited = True + reader.thread.join() + reader.exited = False + + def __start__(): + start_provide_thread(reader.tensor_provider) + + reader.reset = __reset__ + reader.decorate_tensor_provider = __set_tensor_provider__ + reader.decorate_paddle_reader = __set_paddle_reader__ + reader.start = __start__ + + return reader + + def open_files(filenames, shapes, lod_levels, dtypes, - thread_num, + thread_num=None, buffer_size=None, pass_num=1, - for_parallel=True): + is_test=None): """ Open files - This layer takes a list of files to read from and returns a Reader Variable. - Via the Reader Variable, we can get data from given files. All files must - have name suffixs to indicate their formats, e.g., '*.recordio'. + This layer takes a list of files to read from and returns a Reader Variable. + Via the Reader Variable, we can get data from given files. All files must + have name suffixs to indicate their formats, e.g., '*.recordio'. Args: filenames(list): The list of file names. shapes(list): List of tuples which declaring data shapes. lod_levels(list): List of ints which declaring data lod_level. dtypes(list): List of strs which declaring data type. - thread_num(int): The maximal concurrent prefetch thread number. - buffer_size(int): The size of prefetch buffer. + thread_num(None): The number of thread to read files. + Default: min(len(filenames), cpu_number). + buffer_size(None): The buffer size of reader. Default: 3 * thread_num pass_num(int): Number of passes to run. - for_parallel(Bool): Set it as True if you are going to run - subsequent operators in parallel. + is_test(bool|None): Whether `open_files` used for testing or not. If it + is used for testing, the order of data generated is same as the file + order. Otherwise, it is not guaranteed the order of data is same + between every epoch. [Default: False]. Returns: Variable: A Reader Variable via which we can get file data. @@ -395,15 +735,21 @@ def open_files(filenames, './data2.recordio'], shapes=[(3,224,224), (1)], lod_levels=[0, 0], - dtypes=['float32', 'int64'], - thread_num=2, - buffer_size=2) + dtypes=['float32', 'int64']) # Via the reader, we can use 'read_file' layer to get data: image, label = fluid.layers.io.read_file(reader) """ + if thread_num is None: + thread_num = min(len(filenames), multiprocessing.cpu_count()) + else: + thread_num = int(thread_num) + if buffer_size is None: - buffer_size = thread_num + buffer_size = 3 * thread_num + else: + buffer_size = int(buffer_size) + if isinstance(filenames, basestring): filenames = [filenames] dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes] @@ -417,17 +763,18 @@ def open_files(filenames, multi_file_reader_name = unique_name('multi_file_reader') startup_blk = default_startup_program().current_block() startup_reader = startup_blk.create_var(name=multi_file_reader_name) + attrs = { + 'shape_concat': shape_concat, + 'lod_levels': lod_levels, + 'ranks': ranks, + 'file_names': filenames, + 'thread_num': thread_num, + 'buffer_size': buffer_size + } + if is_test is not None: + attrs['is_test'] = is_test startup_blk.append_op( - type='open_files', - outputs={'Out': [startup_reader]}, - attrs={ - 'shape_concat': shape_concat, - 'lod_levels': lod_levels, - 'ranks': ranks, - 'file_names': filenames, - 'thread_num': thread_num, - 'buffer_size': buffer_size - }) + type='open_files', outputs={'Out': [startup_reader]}, attrs=attrs) startup_reader.desc.set_dtypes(dtypes) startup_reader.persistable = True @@ -437,9 +784,6 @@ def open_files(filenames, main_prog_reader = multi_pass( reader=main_prog_reader, pass_num=pass_num) - if for_parallel: - main_prog_reader = parallel(reader=main_prog_reader) - return monkey_patch_reader_methods(main_prog_reader) @@ -468,22 +812,81 @@ def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None): inputs={'UnderlyingReader': reader}, outputs={'Out': [new_reader]}, attrs=attrs) - new_reader.persistable = True - new_reader.stop_gradient = True return monkey_patch_reader_methods(new_reader) def shuffle(reader, buffer_size): + """ + Shuffle the reader. + """ return __create_unshared_decorated_reader__( 'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)}) def batch(reader, batch_size): + """ + This layer is a reader decorator. It takes a reader and adds + 'batching' decoration on it. When reading with the result + decorated reader, output data will be automatically organized + to the form of batches. + + Args: + reader(Variable): The reader to be decorated with 'batching'. + batch_size(int): The batch size. + + Returns: + Variable: The reader which has been decorated with 'batching'. + + Examples: + .. code-block:: python + + raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio', + './data2.recordio'], + shapes=[(3,224,224), (1)], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=2, + buffer_size=2) + batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5) + + # If we read data with the raw_reader: + # data = fluid.layers.read_file(raw_reader) + # We can only get data instance by instance. + # + # However, if we read data with the batch_reader: + # data = fluid.layers.read_file(batch_reader) + # Each 5 adjacent instances will be automatically combined together + # to become a batch. So what we get('data') is a batch data instead + # of an instance. + """ return __create_unshared_decorated_reader__( 'create_batch_reader', reader, {'batch_size': int(batch_size)}) def double_buffer(reader, place=None, name=None): + """ + Wrap a double buffer reader. The data will copy to target place with a + double buffer queue. If the target place is None, the place that executor + perform on will be used. + + Args: + reader(Variable): the reader variable need to be wrapped. + place(Place): the place of target data. Default is the sample place of + executor perform. + + name(str): Variable name. None if the user does not care. + + Returns: + wrapped reader with double buffer. + + Examples: + + >>> reader = fluid.layers.open_files(filenames=['somefile'], + >>> shapes=[[-1, 784], [-1, 1]], + >>> dtypes=['float32', 'int64']) + >>> reader = fluid.layers.double_buffer(reader) + >>> img, label = fluid.layers.read_file(reader) + """ attrs = dict() if place is not None: attrs['place'] = str(place).upper() @@ -496,21 +899,167 @@ def multi_pass(reader, pass_num): 'create_multi_pass_reader', reader, {'pass_num': int(pass_num)}) -def parallel(reader): - return __create_shared_decorated_reader__('create_threaded_reader', reader, - {}) +def read_file(reader): + """ + Execute the given reader and get data via it. + + A reader is also a Variable. It can be a raw reader generated by + `fluid.layers.open_files()` or a decorated one generated by + `fluid.layers.double_buffer()` and so on. + + Args: + + reader(Variable): The reader to execute. + Returns: + Tuple[Variable]: Data read via the given reader. -def read_file(file_obj): + Examples: + .. code-block:: python + + data_file = fluid.layers.open_files( + filenames=['mnist.recordio'], + shapes=[(-1, 748), (-1, 1)], + lod_levels=[0, 0], + dtypes=["float32", "int64"]) + data_file = fluid.layers.double_buffer( + fluid.layers.batch(data_file, batch_size=64)) + input, label = fluid.layers.read_file(data_file) + """ helper = LayerHelper('read_file') out = [ helper.create_tmp_variable( stop_gradient=True, dtype='float32') - for _ in range(len(file_obj.desc.shapes())) + for _ in range(len(reader.desc.shapes())) ] helper.append_op( - type='read', inputs={'Reader': [file_obj]}, outputs={'Out': out}) + type='read', inputs={'Reader': [reader]}, outputs={'Out': out}) if len(out) == 1: return out[0] else: return out + + +class Preprocessor(object): + """ + A block for data pre-processing in reader. + + Args: + reader (Variable): A reader variable. + name (str, default None): The name of the reader. + + Examples: + .. code-block:: python + + preprocessor = fluid.layers.io.Preprocessor(reader=reader) + with preprocessor.block(): + img, lbl = preprocessor.inputs() + img_out = img / 2 + lbl_out = lbl + 1 + preprocessor.outputs(img_out, lbl_out) + + data_file = fluid.layers.io.double_buffer(preprocessor()) + + """ + BEFORE_SUB_BLOCK = 0 + IN_SUB_BLOCK = 1 + AFTER_SUB_BLOCK = 2 + + def __init__(self, reader, name=None): + self.underlying_reader = reader + new_reader_name = name if name is not None else unique_name( + "create_custom_reader") + self.main_prog = default_main_program() + self.reader = self.main_prog.current_block().create_var( + name=new_reader_name) + self.sub_block = None + self.source_var_names = None + self.sink_var_names = None + self.status = Preprocessor.BEFORE_SUB_BLOCK + + def _is_completed(self): + return self.sub_block and self.source_var_names and self.sink_var_names + + @contextlib.contextmanager + def block(self): + self.status = Preprocessor.IN_SUB_BLOCK + self.sub_block = self.main_prog.create_block() + yield + self.main_prog.rollback() + self.status = Preprocessor.AFTER_SUB_BLOCK + if not self._is_completed(): + raise RuntimeError( + "The definition of preprocessor is incompleted! " + "Please make sure that you have set input and output " + "variables by invoking 'inputs' and 'outputs' in " + "Preprocessor's sub-block.") + + def inputs(self): + if self.status != Preprocessor.IN_SUB_BLOCK: + raise RuntimeError( + "Preprocessor.inputs() can only be invoked inside the sub-block." + ) + + source_shapes = self.underlying_reader.desc.shapes() + source_dtypes = self.underlying_reader.desc.dtypes() + source_lod_levels = self.underlying_reader.desc.lod_levels() + self.source_var_names = [ + unique_name("preprocessor_source") + for _ in range(len(source_shapes)) + ] + source_vars = [] + for var_name, shape, dtype, lod_level in zip( + self.source_var_names, source_shapes, source_dtypes, + source_lod_levels): + source_vars.append(self.main_prog.current_block().create_var( + name=var_name, shape=shape, dtype=dtype, lod_level=lod_level)) + return source_vars + + def outputs(self, *outs): + if self.status != Preprocessor.IN_SUB_BLOCK: + raise RuntimeError( + "Preprocessor.outputs() can only be invoked inside the sub-block." + ) + self.sink_var_names = [var.name for var in outs] + + def __call__(self, *args, **kwargs): + if self.status != Preprocessor.AFTER_SUB_BLOCK: + raise RuntimeError( + "Preprocessor output can only be retrieved after rnn block.") + + self.main_prog.current_block().append_op( + type="create_custom_reader", + inputs={'UnderlyingReader': self.underlying_reader}, + outputs={'Out': [self.reader]}, + attrs={ + "sub_block": self.sub_block, + "source_var_names": self.source_var_names, + "sink_var_names": self.sink_var_names + }) + return monkey_patch_reader_methods(self.reader) + + +@templatedoc() +def load(out, file_path, load_as_fp16=None): + """ + ${comment} + + >>> import paddle.fluid as fluid + >>> tmp_tensor = fluid.layers.create_tensor(dtype='float32') + >>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin") + + Args: + out(${out_type}): ${out_comment}. + + file_path(${file_path_type}): ${file_path_comment}. + + load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}. + + Returns: + None + """ + helper = LayerHelper("load", **locals()) + attrs = {"file_path": file_path} + if load_as_fp16 is not None: + attrs['load_as_fp16'] = load_as_fp16 + helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs) diff --git a/python/paddle/fluid/layers/layer_function_generator.py b/python/paddle/fluid/layers/layer_function_generator.py index 35b01a7991..c0d72620b1 100644 --- a/python/paddle/fluid/layers/layer_function_generator.py +++ b/python/paddle/fluid/layers/layer_function_generator.py @@ -12,19 +12,16 @@ # See the License for the specific language governing permissions and # limitations under the License. import re -import cStringIO import functools import warnings +import string +from six.moves import cStringIO from ..proto import framework_pb2 from ..framework import OpProtoHolder, Variable from ..layer_helper import LayerHelper -__all__ = [ - 'deprecated', - 'generate_layer_fn', - 'autodoc', -] +__all__ = ['deprecated', 'generate_layer_fn', 'autodoc', 'templatedoc'] def _convert_(name): @@ -43,6 +40,22 @@ def _convert_(name): return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() +def _type_to_str_(tp): + return framework_pb2.AttrType.Name(tp) + + +_two_dollar_pattern_ = re.compile(r"\$\$([^\$]+)\$\$") +_single_dollar_pattern_ = re.compile(r"\$([^\$]+)\$") +_two_bang_pattern_ = re.compile(r"!!([^!]+)!!") + + +def escape_math(text): + return _two_bang_pattern_.sub( + r'$$\1$$', + _single_dollar_pattern_.sub(r':math:`\1`', + _two_dollar_pattern_.sub(r"!!\1!!", text))) + + def _generate_doc_string_(op_proto): """ Generate docstring by OpProto @@ -54,34 +67,33 @@ def _generate_doc_string_(op_proto): str: the document string """ - def _type_to_str_(tp): - return framework_pb2.AttrType.Name(tp) - if not isinstance(op_proto, framework_pb2.OpProto): raise TypeError("OpProto should be `framework_pb2.OpProto`") - buf = cStringIO.StringIO() - buf.write(op_proto.comment) + buf = cStringIO() + buf.write(escape_math(op_proto.comment)) buf.write('\nArgs:\n') for each_input in op_proto.inputs: line_begin = ' {0}: '.format(_convert_(each_input.name)) buf.write(line_begin) - buf.write(each_input.comment) - buf.write('\n') - buf.write(' ' * len(line_begin)) - buf.write('Duplicable: ') - buf.write(str(each_input.duplicable)) - buf.write(' Optional: ') - buf.write(str(each_input.dispensable)) + buf.write(escape_math(each_input.comment)) + if each_input.duplicable: + buf.write(" Duplicatable.") + if each_input.dispensable: + buf.write(" Optional.") buf.write('\n') + skip_attrs = OpProtoHolder.generated_op_attr_names() + for each_attr in op_proto.attrs: + if each_attr.name in skip_attrs: + continue buf.write(' ') buf.write(each_attr.name) buf.write(' (') buf.write(_type_to_str_(each_attr.type)) buf.write('): ') - buf.write(each_attr.comment) + buf.write(escape_math(each_attr.comment)) buf.write('\n') if len(op_proto.outputs) != 0: @@ -90,7 +102,7 @@ def _generate_doc_string_(op_proto): for each_opt in op_proto.outputs: if not each_opt.intermediate: break - buf.write(each_opt.comment) + buf.write(escape_math(each_opt.comment)) return buf.getvalue() @@ -107,13 +119,13 @@ def generate_layer_fn(op_type): """ op_proto = OpProtoHolder.instance().get_op_proto(op_type) not_intermediate_outputs = \ - filter(lambda output: not output.intermediate, op_proto.outputs) + [output for output in op_proto.outputs if not output.intermediate] intermediate_outputs = \ - filter(lambda output: output.intermediate, op_proto.outputs) + [output for output in op_proto.outputs if output.intermediate] if len(not_intermediate_outputs) != 1: raise ValueError("Only one non intermediate output operator can be", - "automatically generated.") + "automatically generated. {0}".format(op_type)) if not_intermediate_outputs[0].duplicable: raise ValueError( @@ -220,3 +232,61 @@ def autodoc(comment=""): return func return __impl__ + + +def templatedoc(op_type=None): + """ + Decorator of layer function. It will use the docstring from the layer + function as the template. The template arguments are: + + * ${comment}: The operator comment written in CPP. + * ${{name}_comment}: The comment of ${name} written with AddAttr, AddOutput, + and AddInput. The ${name} is Python snake style. i.e., xxx_xxx. + * ${{name}_type}: The type of ${name}. + + Returns: + Decorated function. + """ + + def trim_ending_dot(msg): + return msg.rstrip('.') + + def __impl__(func): + if op_type is None: + op_type_name = func.__name__ + else: + op_type_name = op_type + op_proto = OpProtoHolder.instance().get_op_proto(op_type_name) + tmpl = string.Template(func.__doc__) + + comment_lines = op_proto.comment.split("\n") + comment = "" + for line in comment_lines: + line = line.strip() + if len(line) != 0: + comment += escape_math(line) + comment += " " + elif len(comment) != 0: + comment += "\n \n " + + args = {"comment": trim_ending_dot(comment)} + for each_input in op_proto.inputs: + input_name = _convert_(each_input.name) + args["{0}_comment".format(input_name)] = trim_ending_dot( + each_input.comment) + args["{0}_type".format(input_name)] = "Variable" + for each_attr in op_proto.attrs: + input_name = _convert_(each_attr.name) + args["{0}_comment".format(input_name)] = trim_ending_dot( + each_attr.comment) + args["{0}_type".format(input_name)] = _type_to_str_(each_attr.type) + + for each_opt in op_proto.outputs: + output_name = _convert_(each_opt.name) + args["{0}_comment".format(output_name)] = trim_ending_dot( + each_opt.comment) + args["{0}_type".format(output_name)] = "Variable" + func.__doc__ = tmpl.substitute(args) + return func + + return __impl__ diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index d13c54daa5..daf91a40f7 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -11,17 +11,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -import control_flow -import nn -import ops -import tensor -from ..initializer import init_on_cpu - -__all__ = [ - 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', - 'polynomial_decay', 'piecewise_decay', 'noam_decay' -] """ When training a model, it's often useful to decay the learning rate during training process, this is called @@ -31,6 +20,18 @@ User can also implement their own learning_rate_decay strategy according to this module. """ +from . import control_flow +from . import nn +from . import ops +from . import tensor +from ..initializer import init_on_cpu +from ..framework import default_main_program, Parameter + +__all__ = [ + 'exponential_decay', 'natural_exp_decay', 'inverse_time_decay', + 'polynomial_decay', 'piecewise_decay', 'noam_decay', 'append_LARS' +] + def _decay_step_counter(begin=0): # the first global step is zero in learning rate decay @@ -41,57 +42,76 @@ def _decay_step_counter(begin=0): def noam_decay(d_model, warmup_steps): - """Apply decay to learning rate. - ```python - lr_value = np.power(d_model, -0.5) * np.min([ - np.power(current_steps, -0.5), - np.power(warmup_steps, -1.5) * current_steps - ]) - ``` + """ + Noam decay method. The numpy implementation of noam decay as follows. + + >>> import numpy as np + >>> lr_value = np.power(d_model, -0.5) * np.min([ + >>> np.power(current_steps, -0.5), + >>> np.power(warmup_steps, -1.5) * current_steps]) + + Please reference `attention is all you need + `_. Args: d_model(Variable): The dimensionality of input and output of model. - Reference: attention is all you need - https://arxiv.org/pdf/1706.03762.pdf + warmup_steps(Variable): A super parameter. Returns: The decayed learning rate. """ global_step = _decay_step_counter(1) - with init_on_cpu(): - a = global_step**-0.5 - b = (warmup_steps**-1.5) * global_step - lr_value = (d_model**-0.5) * ops.elementwise_min(a, b) + + a = global_step**-0.5 + b = (warmup_steps**-1.5) * global_step + lr_value = (d_model**-0.5) * ops.elementwise_min(a, b) return lr_value def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): - """Applies exponential decay to the learning rate. + """ + Applies exponential decay to the learning rate. + + When training a model, it is often recommended to lower the learning rate as the + training progresses. By using this function, the learning rate will be decayed by + 'decay_rate' every 'decay_steps' steps. + + >>> if staircase == True: + >>> decayed_learning_rate = learning_rate * decay_rate ^ floor(global_step / decay_steps) + >>> else: + >>> decayed_learning_rate = learning_rate * decay_rate ^ (global_step / decay_steps) - ```python - decayed_learning_rate = learning_rate * - decay_rate ^ (global_step / decay_steps) - ``` Args: - learning_rate: A scalar float32 value or a Variable. This - will be the initial learning rate during training - decay_steps: A Python `int32` number. - decay_rate: A Python `float` number. - staircase: Boolean. If set true, decay the learning rate every decay_steps. + learning_rate(Variable|float): The initial learning rate. + decay_steps(int): See the decay computation above. + decay_rate(float): The decay rate. See the decay computation above. + staircase(Boolean): If True, decay the learning rate at discrete intervals. + Default: False Returns: - The decayed learning rate + Variable: The decayed learning rate + + Examples: + .. code-block:: python + + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + """ global_step = _decay_step_counter() - with init_on_cpu(): - # update learning_rate - div_res = global_step / decay_steps - if staircase: - div_res = ops.floor(div_res) - decayed_lr = learning_rate * (decay_rate**div_res) + div_res = global_step / decay_steps + if staircase: + div_res = ops.floor(div_res) + decayed_lr = learning_rate * (decay_rate**div_res) return decayed_lr @@ -116,41 +136,56 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ global_step = _decay_step_counter() - with init_on_cpu(): - div_res = global_step / decay_steps - if staircase: - div_res = ops.floor(div_res) - decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res) + div_res = global_step / decay_steps + if staircase: + div_res = ops.floor(div_res) + decayed_lr = learning_rate * ops.exp(-1 * decay_rate * div_res) return decayed_lr def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): - """Applies inverse time decay to the initial learning rate. + """ + Applies inverse time decay to the initial learning rate. - >>> if staircase: + When training a model, it is often recommended to lower the learning rate as the + training progresses. By using this function, an inverse decay function will be + applied to the initial learning rate. + + >>> if staircase == True: >>> decayed_learning_rate = learning_rate / (1 + decay_rate * floor(global_step / decay_step)) >>> else: >>> decayed_learning_rate = learning_rate / (1 + decay_rate * global_step / decay_step) Args: - learning_rate: A scalar float32 value or a Variable. This - will be the initial learning rate during training. - decay_steps: A Python `int32` number. - decay_rate: A Python `float` number. - staircase: Boolean. If set true, decay the learning rate every decay_steps. + learning_rate(Variable|float): The initial learning rate. + decay_steps(int): See the decay computation above. + decay_rate(float): The decay rate. See the decay computation above. + staircase(Boolean): If True, decay the learning rate at discrete intervals. + Default: False Returns: - The decayed learning rate + Variable: The decayed learning rate + + Examples: + .. code-block:: python + + base_lr = 0.1 + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.inverse_time_decay( + learning_rate=base_lr, + decay_steps=10000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) """ global_step = _decay_step_counter() - with init_on_cpu(): - div_res = global_step / decay_steps - if staircase: - div_res = ops.floor(div_res) + div_res = global_step / decay_steps + if staircase: + div_res = ops.floor(div_res) - decayed_lr = learning_rate / (1 + decay_rate * div_res) + decayed_lr = learning_rate / (1 + decay_rate * div_res) return decayed_lr @@ -160,62 +195,74 @@ def polynomial_decay(learning_rate, end_learning_rate=0.0001, power=1.0, cycle=False): - """Applies polynomial decay to the initial learning rate. + """ + Applies polynomial decay to the initial learning rate. + + .. code-block:: python + + if cycle: + decay_steps = decay_steps * ceil(global_step / decay_steps) + else: + global_step = min(global_step, decay_steps) + decayed_learning_rate = (learning_rate - end_learning_rate) * + (1 - global_step / decay_steps) ^ power + end_learning_rate - >>> if cycle: - >>> decay_steps = decay_steps * ceil(global_step / decay_steps) - >>> else: - >>> global_step = min(global_step, decay_steps) - >>> decayed_learning_rate = (learning_rate - end_learning_rate) * - >>> (1 - global_step / decay_steps) ^ power + - >>> end_learning_rate Args: - learning_rate: A scalar float32 value or a Variable. This - will be the initial learning rate during training - decay_steps: A Python `int32` number. - end_learning_rate: A Python `float` number. - power: A Python `float` number - cycle: Boolean. If set true, decay the learning rate every decay_steps. + learning_rate(Variable|float32): A scalar float32 value or a Variable. This + will be the initial learning rate during training. + decay_steps(int32): A Python `int32` number. + end_learning_rate(float): A Python `float` number. + power(float): A Python `float` number. + cycle(bool): If set true, decay the learning rate every decay_steps. Returns: - The decayed learning rate + Variable: The decayed learning rate """ global_step = _decay_step_counter() - with init_on_cpu(): - if cycle: - div_res = ops.ceil(global_step / decay_steps) - zero_var = tensor.fill_constant( - shape=[1], dtype='float32', value=0.0) - one_var = tensor.fill_constant( - shape=[1], dtype='float32', value=1.0) - - with control_flow.Switch() as switch: - with switch.case(global_step == zero_var): - tensor.assign(input=one_var, output=div_res) - decay_steps = decay_steps * div_res - else: - decay_steps_var = tensor.fill_constant( - shape=[1], dtype='float32', value=float(decay_steps)) - global_step = ops.elementwise_min(x=global_step, y=decay_steps_var) + if cycle: + div_res = ops.ceil(global_step / decay_steps) + zero_var = tensor.fill_constant(shape=[1], dtype='float32', value=0.0) + one_var = tensor.fill_constant(shape=[1], dtype='float32', value=1.0) - decayed_lr = (learning_rate - end_learning_rate) * \ - ((1 - global_step / decay_steps) ** power) + end_learning_rate + with control_flow.Switch() as switch: + with switch.case(global_step == zero_var): + tensor.assign(input=one_var, output=div_res) + decay_steps = decay_steps * div_res + else: + decay_steps_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(decay_steps)) + global_step = ops.elementwise_min(x=global_step, y=decay_steps_var) + + decayed_lr = (learning_rate - end_learning_rate) * \ + ((1 - global_step / decay_steps) ** power) + end_learning_rate return decayed_lr def piecewise_decay(boundaries, values): """Applies piecewise decay to the initial learning rate. - >>> boundaries = [10000, 20000] - >>> values = [1.0, 0.5, 0.1] - >>> - >>> if step < 10000: - >>> learning_rate = 1.0 - >>> elif 10000 <= step < 20000: - >>> learning_rate = 0.5 - >>> else: - >>> learning_rate = 0.1 + The algorithm can be described as the code below. + + .. code-block:: python + + boundaries = [10000, 20000] + values = [1.0, 0.5, 0.1] + if step < 10000: + learning_rate = 1.0 + elif 10000 <= step < 20000: + learning_rate = 0.5 + else: + learning_rate = 0.1 + Args: + boundaries: A list of steps numbers. + values: A list of learning rate values that will be picked during + different step boundaries. + + Returns: + The decayed learning rate. + + """ if len(values) - len(boundaries) != 1: @@ -223,27 +270,65 @@ def piecewise_decay(boundaries, values): global_step = _decay_step_counter() - with init_on_cpu(): - lr = tensor.create_global_var( - shape=[1], - value=0.0, - dtype='float32', - persistable=True, - name="learning_rate") + lr = tensor.create_global_var( + shape=[1], + value=0.0, + dtype='float32', + persistable=True, + name="learning_rate") - with control_flow.Switch() as switch: - for i in range(len(boundaries)): - boundary_val = tensor.fill_constant( - shape=[1], dtype='float32', value=float(boundaries[i])) - value_var = tensor.fill_constant( - shape=[1], dtype='float32', value=float(values[i])) - with switch.case(global_step < boundary_val): - tensor.assign(value_var, lr) - last_value_var = tensor.fill_constant( + with control_flow.Switch() as switch: + for i in range(len(boundaries)): + boundary_val = tensor.fill_constant( shape=[1], dtype='float32', - value=float(values[len(values) - 1])) - with switch.default(): - tensor.assign(last_value_var, lr) + value=float(boundaries[i]), + force_cpu=True) + value_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(values[i])) + with switch.case(global_step < boundary_val): + tensor.assign(value_var, lr) + last_value_var = tensor.fill_constant( + shape=[1], dtype='float32', value=float(values[len(values) - 1])) + with switch.default(): + tensor.assign(last_value_var, lr) return lr + + +def append_LARS(params_grads, learning_rate, weight_decay): + """Applies LARS (LAYER-WISE ADAPTIVE RATE SCALING) to learning rate for + each layer. + + ```python + learning_rate *= local_gw_ratio * sqrt(sumsq(param)) + / (sqrt(sumsq(gradient))+ weight_decay * sqrt(sumsq(param))) + ``` + + Args: + learning_rate: A learning rate Variable. This + is the global learning rate for LARS. + weight_decay: A Python `float` number. + + Returns: + The decayed learning rate + """ + + def _balanced_weight(param_norm, grad_norm): + if weight_decay == 1.0: + return grad_norm + param_norm + else: + return grad_norm + weight_decay * param_norm + + for param, grad in params_grads: + param_lr = param.optimize_attr['learning_rate'] + param_norm = ops.sqrt(nn.reduce_sum(input=ops.square(param))) + grad_norm = ops.sqrt(nn.reduce_sum(input=ops.square(grad))) + if type(param_lr) == float and param_lr == 1.0: + decayed_lr = learning_rate * param_norm \ + / _balanced_weight(param_norm, grad_norm) + else: + decayed_lr = learning_rate * param_lr * param_norm \ + / _balanced_weight(param_norm, grad_norm) + # set back param local learning rate + param.optimize_attr['learning_rate'] = decayed_lr diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 1754061c4b..0e10a91d25 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -13,11 +13,9 @@ # limitations under the License. from ..framework import Variable, unique_name -from layer_function_generator import OpProtoHolder +from .layer_function_generator import OpProtoHolder from ..initializer import force_init_on_cpu -__all__ = ['monkey_patch_variable'] - def monkey_patch_variable(): def unique_tmp_name(): diff --git a/python/paddle/fluid/layers/metric.py b/python/paddle/fluid/layers/metric.py deleted file mode 100644 index cab2eb5551..0000000000 --- a/python/paddle/fluid/layers/metric.py +++ /dev/null @@ -1,81 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -All layers just related to metric. -""" - -import warnings -from ..layer_helper import LayerHelper -from ..initializer import Normal, Constant -from ..framework import Variable -from ..param_attr import ParamAttr -import nn - -__all__ = ['accuracy', 'auc'] - - -def accuracy(input, label, k=1, correct=None, total=None): - """ - This function computes the accuracy using the input and label. - The output is the top k inputs and their indices. - """ - helper = LayerHelper("accuracy", **locals()) - topk_out, topk_indices = nn.topk(input, k=k) - acc_out = helper.create_tmp_variable(dtype="float32") - if correct is None: - correct = helper.create_tmp_variable(dtype="int64") - if total is None: - total = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="accuracy", - inputs={ - "Out": [topk_out], - "Indices": [topk_indices], - "Label": [label] - }, - outputs={ - "Accuracy": [acc_out], - "Correct": [correct], - "Total": [total], - }) - return acc_out - - -def auc(input, label, curve='ROC', num_thresholds=200): - warnings.warn( - "This interface not recommended, fluid.layers.auc compute the auc at every minibatch, \ - but can not aggregate them and get the pass AUC, because pass \ - auc can not be averaged with weighted from the minibatch auc value. \ - Please use fluid.metrics.Auc, it can compute the auc value via Python natively, \ - which can get every minibatch and every pass auc value.", Warning) - helper = LayerHelper("auc", **locals()) - topk_out = helper.create_tmp_variable(dtype=input.dtype) - topk_indices = helper.create_tmp_variable(dtype="int64") - topk_out, topk_indices = nn.topk(input, k=k) - auc_out = helper.create_tmp_variable(dtype="float32") - if correct is None: - correct = helper.create_tmp_variable(dtype="int64") - if total is None: - total = helper.create_tmp_variable(dtype="int64") - helper.append_op( - type="accuracy", - inputs={ - "Out": [topk_out], - "Indices": [topk_indices], - "Label": [label] - }, - attrs={"curve": curve, - "num_thresholds": num_thresholds}, - outputs={"AUC": [auc_out], }) - return auc_out diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py new file mode 100644 index 0000000000..49bae1e8af --- /dev/null +++ b/python/paddle/fluid/layers/metric_op.py @@ -0,0 +1,148 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +All layers just related to metric. +""" + +import warnings +from ..layer_helper import LayerHelper +from ..initializer import Normal, Constant +from ..framework import Variable +from ..param_attr import ParamAttr +from . import nn + +__all__ = ['accuracy', 'auc'] + + +def accuracy(input, label, k=1, correct=None, total=None): + """ + accuracy layer. + Refer to the https://en.wikipedia.org/wiki/Precision_and_recall + + This function computes the accuracy using the input and label. + If the correct label occurs in top k predictions, then correct will increment by one. + Note: the dtype of accuracy is determined by input. the input and label dtype can be different. + + Args: + input(Variable): The input of accuracy layer, which is the predictions of network. + Carry LoD information is supported. + label(Variable): The label of dataset. + k(int): The top k predictions for each class will be checked. + correct(Variable): The correct predictions count. + total(Variable): The total entries count. + + Returns: + Variable: The correct rate. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[-1, 32, 32], dtype="float32") + label = fluid.layers.data(name="data", shape=[-1,1], dtype="int32") + predict = fluid.layers.fc(input=data, size=10) + acc = fluid.layers.accuracy(input=predict, label=label, k=5) + + """ + helper = LayerHelper("accuracy", **locals()) + topk_out, topk_indices = nn.topk(input, k=k) + acc_out = helper.create_tmp_variable(dtype="float32") + if correct is None: + correct = helper.create_tmp_variable(dtype="int64") + if total is None: + total = helper.create_tmp_variable(dtype="int64") + helper.append_op( + type="accuracy", + inputs={ + "Out": [topk_out], + "Indices": [topk_indices], + "Label": [label] + }, + outputs={ + "Accuracy": [acc_out], + "Correct": [correct], + "Total": [total], + }) + return acc_out + + +def auc(input, label, curve='ROC', num_thresholds=200, topk=1): + """ + **Area Under the Curve (AUC) Layer** + + This implementation computes the AUC according to forward output and label. + It is used very widely in binary classification evaluation. + + Note: If input label contains values other than 0 and 1, it will be cast + to `bool`. Find the relevant definitions `here `_. + + There are two types of possible curves: + + 1. ROC: Receiver operating characteristic; + 2. PR: Precision Recall + + Args: + input(Variable): A floating-point 2D Variable, values are in the range + [0, 1]. Each row is sorted in descending order. This + input should be the output of topk. Typically, this + Variable indicates the probability of each label. + label(Variable): A 2D int Variable indicating the label of the training + data. The height is batch size and width is always 1. + curve(str): Curve type, can be 'ROC' or 'PR'. Default 'ROC'. + num_thresholds(int): The number of thresholds to use when discretizing + the roc curve. Default 200. + topk(int): only topk number of prediction output will be used for auc. + + Returns: + Variable: A scalar representing the current AUC. + + Examples: + .. code-block:: python + + # network is a binary classification model and label the ground truth + prediction = network(image, is_infer=True) + auc_out=fluid.layers.auc(input=prediction, label=label) + """ + helper = LayerHelper("auc", **locals()) + auc_out = helper.create_tmp_variable(dtype="float64") + # make tp, tn, fp, fn persistable, so that can accumulate all batches. + tp = helper.create_global_variable(persistable=True, dtype='int64') + tn = helper.create_global_variable(persistable=True, dtype='int64') + fp = helper.create_global_variable(persistable=True, dtype='int64') + fn = helper.create_global_variable(persistable=True, dtype='int64') + for var in [tp, tn, fp, fn]: + helper.set_variable_initializer( + var, Constant( + value=0.0, force_cpu=True)) + + helper.append_op( + type="auc", + inputs={ + "Predict": [input], + "Label": [label], + "TP": [tp], + "TN": [tn], + "FP": [fp], + "FN": [fn] + }, + attrs={"curve": curve, + "num_thresholds": num_thresholds}, + outputs={ + "AUC": [auc_out], + "TPOut": [tp], + "TNOut": [tn], + "FPOut": [fp], + "FNOut": [fn] + }) + return auc_out, [tp, tn, fp, fn] diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index 1786be22fd..0960b54123 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -1,4 +1,18 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Copyright (c ) 2018 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,9 +33,12 @@ from ..layer_helper import LayerHelper from ..initializer import Normal, Constant from ..framework import Variable from ..param_attr import ParamAttr -from layer_function_generator import autodoc -from tensor import concat -import utils +from .layer_function_generator import autodoc, templatedoc +from .tensor import concat +from . import utils +import random +from .. import unique_name +from functools import reduce __all__ = [ 'fc', @@ -38,13 +55,16 @@ __all__ = [ 'chunk_eval', 'sequence_conv', 'conv2d', + 'conv3d', 'sequence_pool', 'sequence_softmax', 'softmax', 'pool2d', + 'pool3d', 'batch_norm', 'beam_search_decode', 'conv2d_transpose', + 'conv3d_transpose', 'sequence_expand', 'lstm_unit', 'reduce_sum', @@ -66,6 +86,7 @@ __all__ = [ 'transpose', 'im2sequence', 'nce', + 'hsigmoid', 'beam_search', 'row_conv', 'multiplex', @@ -80,6 +101,17 @@ __all__ = [ 'pad', 'label_smooth', 'roi_pool', + 'dice_loss', + 'image_resize', + 'image_resize_short', + 'resize_bilinear', + 'gather', + 'random_crop', + 'mean_iou', + 'relu', + 'log', + 'crop', + 'rank_loss', ] @@ -88,7 +120,6 @@ def fc(input, num_flatten_dims=1, param_attr=None, bias_attr=None, - use_cudnn=False, use_mkldnn=False, act=None, is_test=False, @@ -96,14 +127,15 @@ def fc(input, """ **Fully Connected Layer** - The fully connected layer can take multiple tensors as its inputs. It - creates a variable called weights for each input tensor, which represents - a fully connected weight matrix from each input unit to each output unit. - The fully connected layer multiplies each input tensor with its coresponding - weight to produce an output Tensor. If multiple input tensors are given, - the results of multiple multiplications will be sumed up. If bias_attr is - not None, a bias variable will be created and added to the output. Finally, - if activation is not None, it will be applied to the output as well. + This function creates a fully connected layer in the network. It can take + multiple tensors as its inputs. It creates a variable called weights for + each input tensor, which represents a fully connected weight matrix from + each input unit to each output unit. The fully connected layer multiplies + each input tensor with its coresponding weight to produce an output Tensor. + If multiple input tensors are given, the results of multiple multiplications + will be sumed up. If bias_attr is not None, a bias variable will be created + and added to the output. Finally, if activation is not None, it will be applied + to the output as well. This process can be formulated as follows: @@ -136,7 +168,8 @@ def fc(input, param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable parameters/weights of this layer. bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias - of this layer. If it is set to None, no bias will be added to the output units. + of this layer. If it is set to False, no bias will be added to the output units. + If it is set to None, the bias is initialized zero. Default: None. act (str, default None): Activation to be applied to the output of this layer. is_test(bool): A flag indicating whether execution is in test phase. use_mkldnn(bool): Use mkldnn kernel or not, it is valid only when the mkldnn @@ -144,7 +177,7 @@ def fc(input, name (str, default None): The name of this layer. Returns: - A tensor variable storing the transformation result. + Variable: The transformation result. Raises: ValueError: If rank of the input tensor is less than 2. @@ -175,11 +208,8 @@ def fc(input, inputs={"X": input_var, "Y": w}, outputs={"Out": tmp}, - attrs={ - "x_num_col_dims": num_flatten_dims, - "y_num_col_dims": 1, - "use_mkldnn": use_mkldnn - }) + attrs={"x_num_col_dims": num_flatten_dims, + "y_num_col_dims": 1}) mul_results.append(tmp) if len(mul_results) == 1: @@ -187,7 +217,10 @@ def fc(input, else: pre_bias = helper.create_tmp_variable(dtype) helper.append_op( - type="sum", inputs={"X": mul_results}, outputs={"Out": pre_bias}) + type="sum", + inputs={"X": mul_results}, + outputs={"Out": pre_bias}, + attrs={"use_mkldnn": use_mkldnn}) # add bias pre_activation = helper.append_bias_op(pre_bias, dim_start=num_flatten_dims) # add activation @@ -217,10 +250,11 @@ def embedding(input, have two elements which indicate the size of the dictionary of embeddings and the size of each embedding vector respectively. is_sparse(bool): The flag indicating whether to use sparse update. + is_distributed(bool): Whether to run lookup table from remote parameter server. padding_idx(int|long|None): If :attr:`None`, it makes no effect to lookup. Otherwise the given :attr:`padding_idx` indicates padding the output with zeros whenever lookup encounters it in :attr:`input`. If - :math:`padding_idx < 0`, the padding_idx to use in lookup is + :math:`padding_idx < 0`, the :attr:`padding_idx` to use in lookup is :math:`size[0] + dim`. param_attr(ParamAttr): Parameters for this layer dtype(np.dtype|core.VarDesc.VarType|str): The type of data : float32, float_16, int etc @@ -256,9 +290,11 @@ def embedding(input, return tmp -# TODO(qijun): expose H0 and C0 +@templatedoc(op_type="lstm") def dynamic_lstm(input, size, + h_0=None, + c_0=None, param_attr=None, bias_attr=None, use_peepholes=True, @@ -269,56 +305,18 @@ def dynamic_lstm(input, dtype='float32', name=None): """ - **Dynamic LSTM Layer** - - The defalut implementation is diagonal/peephole connection - (https://arxiv.org/pdf/1402.1128.pdf), the formula is as follows: - - .. math:: - - i_t & = \sigma(W_{ix}x_{t} + W_{ih}h_{t-1} + W_{ic}c_{t-1} + b_i) - - f_t & = \sigma(W_{fx}x_{t} + W_{fh}h_{t-1} + W_{fc}c_{t-1} + b_f) - - \\tilde{c_t} & = act_g(W_{cx}x_t + W_{ch}h_{t-1} + b_c) - - o_t & = \sigma(W_{ox}x_{t} + W_{oh}h_{t-1} + W_{oc}c_t + b_o) - - c_t & = f_t \odot c_{t-1} + i_t \odot \\tilde{c_t} - - h_t & = o_t \odot act_h(c_t) - - where the :math:`W` terms denote weight matrices (e.g. :math:`W_{xi}` is - the matrix of weights from the input gate to the input), :math:`W_{ic}, \ - W_{fc}, W_{oc}` are diagonal weight matrices for peephole connections. In - our implementation, we use vectors to reprenset these diagonal weight - matrices. The :math:`b` terms denote bias vectors (:math:`b_i` is the input - gate bias vector), :math:`\sigma` is the non-linear activations, such as - logistic sigmoid function, and :math:`i, f, o` and :math:`c` are the input - gate, forget gate, output gate, and cell activation vectors, respectively, - all of which have the same size as the cell output activation vector :math:`h`. - - The :math:`\odot` is the element-wise product of the vectors. :math:`act_g` - and :math:`act_h` are the cell input and cell output activation functions - and `tanh` is usually used for them. :math:`\\tilde{c_t}` is also called - candidate hidden state, which is computed based on the current input and - the previous hidden state. - - Set `use_peepholes` to `False` to disable peephole connection. The formula - is omitted here, please refer to the paper - http://www.bioinf.jku.at/publications/older/2604.pdf for details. - - Note that these :math:`W_{xi}x_{t}, W_{xf}x_{t}, W_{xc}x_{t}, W_{xo}x_{t}` - operations on the input :math:`x_{t}` are NOT included in this operator. - Users can choose to use fully-connect layer before LSTM layer. + ${comment} Args: - input(Variable): The input of dynamic_lstm layer, which supports - variable-time length input sequence. The underlying - tensor in this Variable is a matrix with shape - (T X 4D), where T is the total time steps in this - mini-batch, D is the hidden size. - size(int): 4 * hidden size. + input (Variable): ${input_comment} + size (int): 4 * hidden size. + h_0(Variable): The initial hidden state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size and D is the hidden size. + c_0(Variable): The initial cell state is an optional input, default is zero. + This is a tensor with shape (N x D), where N is the + batch size. `h_0` and `c_0` can be NULL but only at the same time. + param_attr(ParamAttr|None): The parameter attribute for the learnable hidden-hidden weights. @@ -326,32 +324,26 @@ def dynamic_lstm(input, W_{fh}, W_{oh}`} - The shape is (D x 4D), where D is the hidden size. - bias_attr(ParamAttr|None): The bias attribute for the learnable bias + bias_attr (ParamAttr|None): The bias attribute for the learnable bias weights, which contains two parts, input-hidden bias weights and peephole connections weights if setting `use_peepholes` to `True`. 1. `use_peepholes = False` - - Biases = {:math:`b_c, b_i, b_f, b_o`}. - - The shape is (1 x 4D). + - Biases = {:math:`b_c, b_i, b_f, b_o`}. + - The shape is (1 x 4D). 2. `use_peepholes = True` - - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \ + - Biases = { :math:`b_c, b_i, b_f, b_o, W_{ic}, \ W_{fc}, W_{oc}`}. - - The shape is (1 x 7D). - use_peepholes(bool): Whether to enable diagonal/peephole connections, - default `True`. - is_reverse(bool): Whether to compute reversed LSTM, default `False`. - gate_activation(str): The activation for input gate, forget gate and - output gate. Choices = ["sigmoid", "tanh", "relu", - "identity"], default "sigmoid". - cell_activation(str): The activation for cell output. Choices = ["sigmoid", - "tanh", "relu", "identity"], default "tanh". - candidate_activation(str): The activation for candidate hidden state. - Choices = ["sigmoid", "tanh", "relu", "identity"], - default "tanh". - dtype(str): Data type. Choices = ["float32", "float64"], default "float32". - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + - The shape is (1 x 7D). + use_peepholes (bool): ${use_peepholes_comment} + is_reverse (bool): ${is_reverse_comment} + gate_activation (str): ${gate_activation_comment} + cell_activation (str): ${cell_activation_comment} + candidate_activation (str): ${candidate_activation_comment} + dtype (str): Data type. Choices = ["float32", "float64"], default "float32". + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: tuple: The hidden state, and cell state of LSTM. The shape of both \ @@ -381,12 +373,20 @@ def dynamic_lstm(input, cell = helper.create_tmp_variable(dtype) batch_gate = helper.create_tmp_variable(dtype) batch_cell_pre_act = helper.create_tmp_variable(dtype) + inputs = {'Input': input, 'Weight': weight, 'Bias': bias} + batch_size = input.shape[0] + if h_0: + assert h_0.shape == (batch_size, size), \ + 'The shape of h0 should be (batch_size, %d)' % size + inputs['H0'] = h_0 + if c_0: + assert c_0.shape == (batch_size, size), \ + 'The shape of c0 should be (batch_size, %d)' % size + inputs['C0'] = c_0 helper.append_op( type='lstm', - inputs={'Input': input, - 'Weight': weight, - 'Bias': bias}, + inputs=inputs, outputs={ 'Hidden': hidden, 'Cell': cell, @@ -524,15 +524,21 @@ def dynamic_lstmp(input, will be named automatically. Returns: - tuple: The projection of hidden state, and cell state of LSTMP. The \ - shape of projection is (T x P), for the cell state which is \ - (T x D), and both LoD is the same with the `input`. + tuple: A tuple of two output variable: the projection of hidden state, \ + and cell state of LSTMP. The shape of projection is (T x P), \ + for the cell state which is (T x D), and both LoD is the same \ + with the `input`. Examples: + .. code-block:: python + dict_dim, emb_dim = 128, 64 + data = fluid.layers.data(name='sequence', shape=[1], + dtype='int32', lod_level=1) + emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) hidden_dim, proj_dim = 512, 256 - fc_out = fluid.layers.fc(input=input_seq, size=hidden_dim * 4, + fc_out = fluid.layers.fc(input=emb, size=hidden_dim * 4, act=None, bias_attr=None) proj_out, _ = fluid.layers.dynamic_lstmp(input=fc_out, size=hidden_dim * 4, @@ -598,10 +604,10 @@ def dynamic_gru(input, candidate_activation='tanh', h_0=None): """ - **Dynamic GRU Layer** + **Gated Recurrent Unit (GRU) Layer** Refer to `Empirical Evaluation of Gated Recurrent Neural Networks on - Sequence Modeling `_ + Sequence Modeling `_ . The formula is as follows: @@ -646,18 +652,27 @@ def dynamic_gru(input, :attr:`False`. gate_activation(str): The activation for update gate and reset gate. Choices = ["sigmoid", "tanh", "relu", "identity"], default "sigmoid". - activation(str): The activation for candidate hidden state. + candidate_activation(str): The activation for candidate hidden state. Choices = ["sigmoid", "tanh", "relu", "identity"], default "tanh". + h_0 (Variable): This is initial hidden state. If not set, default is + zero. This is a tensor with shape (N x D), where N is the number of + total time steps of input mini-batch feature and D is the hidden + size. Returns: Variable: The hidden state of GRU. The shape is :math:`(T \\times D)`, \ - and lod is the same with the input. + and sequence length is the same with the input. Examples: + .. code-block:: python + dict_dim, emb_dim = 128, 64 + data = fluid.layers.data(name='sequence', shape=[1], + dtype='int32', lod_level=1) + emb = fluid.layers.embedding(input=data, size=[dict_dim, emb_dim]) hidden_dim = 512 - x = fluid.layers.fc(input=data, size=hidden_dim * 3) + x = fluid.layers.fc(input=emb, size=hidden_dim * 3) hidden = fluid.layers.dynamic_gru(input=x, dim=hidden_dim) """ @@ -668,11 +683,13 @@ def dynamic_gru(input, attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) bias = helper.create_parameter( attr=helper.bias_attr, shape=[1, 3 * size], dtype=dtype, is_bias=True) + batch_size = input.shape[0] inputs = {'Input': input, 'Weight': weight, 'Bias': bias} if h_0 != None: assert h_0.shape == ( - size, size), 'The shape of h0 should be(%d, %d)' % (size, size) - inputs['h0'] = h_0 + batch_size, size + ), 'The shape of h0 should be(batch_size, %d)' % size + inputs['H0'] = h_0 hidden = helper.create_tmp_variable(dtype) batch_gate = helper.create_tmp_variable(dtype) @@ -699,8 +716,8 @@ def dynamic_gru(input, def gru_unit(input, hidden, size, - weight=None, - bias=None, + param_attr=None, + bias_attr=None, activation='tanh', gate_activation='sigmoid'): """ @@ -731,8 +748,8 @@ def gru_unit(input, input (Variable): The fc transformed input value of current step. hidden (Variable): The hidden value of lstm unit from previous step. size (integer): The input dimension value. - weight (ParamAttr): The weight parameters for gru unit. Default: None - bias (ParamAttr): The bias parameters for gru unit. Default: None + param_attr (ParamAttr): The weight parameters for gru unit. Default: None + bias_attr (ParamAttr): The bias parameters for gru unit. Default: None activation (string): The activation type for cell (actNode). Default: 'tanh' gate_activation (string): The activation type for gates (actGate). @@ -764,40 +781,55 @@ def gru_unit(input, size = size / 3 # create weight - if weight is None: - weight = helper.create_parameter( - attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) + weight = helper.create_parameter( + attr=helper.param_attr, shape=[size, 3 * size], dtype=dtype) + gate = helper.create_tmp_variable(dtype) + reset_hidden_pre = helper.create_tmp_variable(dtype) + updated_hidden = helper.create_tmp_variable(dtype) + inputs = {'Input': input, 'HiddenPrev': hidden, 'Weight': weight} # create bias - - if bias is None: + if helper.bias_attr: bias_size = [1, 3 * size] bias = helper.create_parameter( attr=helper.bias_attr, shape=bias_size, dtype=dtype, is_bias=True) - - gate = helper.create_tmp_variable(dtype) - reset_hidden_pre = helper.create_tmp_variable(dtype) - updated_hidden = helper.create_tmp_variable(dtype) + inputs['Bias'] = bias helper.append_op( type='gru_unit', - inputs={'Input': input, - 'HiddenPrev': hidden, - 'Weight': weight}, + inputs=inputs, outputs={ 'Gate': gate, 'ResetHiddenPrev': reset_hidden_pre, 'Hidden': updated_hidden, }, attrs={ - 'activation': 0, - 'gate_activation': 1, + 'activation': 2, # tanh + 'gate_activation': 1, # sigmoid }) return updated_hidden, reset_hidden_pre, gate +@templatedoc() def linear_chain_crf(input, label, param_attr=None): + """ + Linear Chain CRF. + + ${comment} + + Args: + input(${emission_type}): ${emission_comment} + input(${transition_type}): ${transition_comment} + label(${label_type}): ${label_comment} + param_attr(ParamAttr): The attribute of the learnable parameter. + + Returns: + output(${emission_exps_type}): ${emission_exps_comment} \n + output(${transition_exps_type}): ${transition_exps_comment} \n + output(${log_likelihood_type}): ${log_likelihood_comment} + + """ helper = LayerHelper('linear_chain_crf', **locals()) size = input.shape[1] transition = helper.create_parameter( @@ -823,7 +855,27 @@ def linear_chain_crf(input, label, param_attr=None): return log_likelihood +@templatedoc() def crf_decoding(input, param_attr, label=None): + """ + ${comment} + + Args: + input(${emission_type}): ${emission_comment} + + param_attr(ParamAttr): The parameter attribute for training. + + label(${label_type}): ${label_comment} + + Returns: + Variable: ${viterbi_path_comment} + + Examples: + .. code-block:: python + + crf_decode = layers.crf_decoding( + input=hidden, param_attr=ParamAttr(name="crfw")) + """ helper = LayerHelper('crf_decoding', **locals()) transition = helper.get_parameter(param_attr.name) viterbi_path = helper.create_tmp_variable(dtype=helper.input_dtype()) @@ -837,10 +889,17 @@ def crf_decoding(input, param_attr, label=None): return viterbi_path +@templatedoc() def cos_sim(X, Y): """ - This function performs the cosine similarity between two tensors - X and Y and returns that as the output. + ${comment} + + Args: + X (Variable): ${x_comment}. + Y (Variable): ${y_comment}. + + Returns: + Variable: the output of cosine(X, Y). """ helper = LayerHelper('cos_sim', **locals()) out = helper.create_tmp_variable(dtype=X.dtype) @@ -856,38 +915,45 @@ def cos_sim(X, Y): return out -def dropout(x, dropout_prob, is_test=False, seed=None): +def dropout(x, dropout_prob, is_test=False, seed=None, name=None): """ Computes dropout. Drop or keep each element of `x` independently. Dropout is a regularization technique for reducing overfitting by preventing neuron co-adaption during - training. The dropout operator randomly set (according to the given dropout + training. The dropout operator randomly sets (according to the given dropout probability) the outputs of some units to zero, while others are remain unchanged. Args: - x(variable): The input tensor. - dropout_prob(float): Probability of setting units to zero. - is_test(bool): A flag indicating whether it is in test phrase or not. - seed(int): A Python integer used to create random seeds. If this - parameter is set to None, a random seed is used. - NOTE: If an integer seed is given, always the same output - units will be dropped. DO NOT use a fixed seed in training. + x (Variable): The input tensor variable. + dropout_prob (float): Probability of setting units to zero. + is_test (bool): A flag indicating whether it is in test phrase or not. + seed (int): A Python integer used to create random seeds. If this + parameter is set to None, a random seed is used. + NOTE: If an integer seed is given, always the same output + units will be dropped. DO NOT use a fixed seed in training. + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: - Variable: A tensor variable. + Variable: A tensor variable is the shape with `x`. Examples: + .. code-block:: python - x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") - droped = fluid.layers.dropout(input=x, dropout_rate=0.5) + x = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + droped = fluid.layers.dropout(x, dropout_prob=0.5) """ helper = LayerHelper('dropout', **locals()) out = helper.create_tmp_variable(dtype=x.dtype) mask = helper.create_tmp_variable(dtype=x.dtype, stop_gradient=True) + + if (seed is None or seed == 0) and helper.main_program.random_seed != 0: + seed = helper.main_program.random_seed + helper.append_op( type='dropout', inputs={'X': [x]}, @@ -995,8 +1061,8 @@ def square_error_cost(input, label): * :math:`Out`: Output value, same shape with :math:`X`. Args: - input(Variable): Input tensor, has predictions. - label(Variable): Label tensor, has target labels. + input (Variable): Input tensor, has predictions. + label (Variable): Label tensor, has target labels. Returns: Variable: The tensor variable storing the element-wise squared error \ @@ -1025,14 +1091,101 @@ def square_error_cost(input, label): return square_out +@templatedoc() def chunk_eval(input, label, chunk_scheme, num_chunk_types, excluded_chunk_types=None): """ + **Chunk Evaluator** + This function computes and outputs the precision, recall and F1-score of chunk detection. + + For some basics of chunking, please refer to + 'Chunking with Support Vector Machines '. + + ChunkEvalOp computes the precision, recall, and F1-score of chunk detection, + and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. + Here is a NER example of labeling for these tagging schemes: + + .. code-block:: python + + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + Li Ming works at Agricultural Bank of China in Beijing. + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + IO I-PER I-PER O O I-ORG I-ORG I-ORG I-ORG O I-LOC + IOB B-PER I-PER O O B-ORG I-ORG I-ORG I-ORG O B-LOC + IOE I-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O E-LOC + IOBES B-PER E-PER O O I-ORG I-ORG I-ORG E-ORG O S-LOC + ====== ====== ====== ===== == ============ ===== ===== ===== == ========= + + There are three chunk types(named entity types) including PER(person), ORG(organization) + and LOC(LOCATION), and we can see that the labels have the form -. + + Since the calculations actually use label ids rather than labels, extra attention + should be paid when mapping labels to ids to make CheckEvalOp work. The key point + is that the listed equations are satisfied by ids. + + .. code-block:: python + + tag_type = label % num_tag_type + chunk_type = label / num_tag_type + + where `num_tag_type` is the num of tag types in the tagging scheme, `num_chunk_type` + is the num of chunk types, and `tag_type` get its value from the following table. + + .. code-block:: python + + Scheme Begin Inside End Single + plain 0 - - - + IOB 0 1 - - + IOE - 0 1 - + IOBES 0 1 2 3 + + Still use NER as example, assuming the tagging scheme is IOB while chunk types are ORG, + PER and LOC. To satisfy the above equations, the label map can be like this: + + .. code-block:: python + + B-ORG 0 + I-ORG 1 + B-PER 2 + I-PER 3 + B-LOC 4 + I-LOC 5 + O 6 + + It's not hard to verify the equations noting that the num of chunk types + is 3 and the num of tag types in IOB scheme is 2. For example, the label + id of I-LOC is 5, the tag type id of I-LOC is 1, and the chunk type id of + I-LOC is 2, which consistent with the results from the equations. + + Args: + input (Variable): prediction output of the network. + label (Variable): label of the test data set. + chunk_scheme (str): ${chunk_scheme_comment} + num_chunk_types (int): ${num_chunk_types_comment} + excluded_chunk_types (list): ${excluded_chunk_types_comment} + + Returns: + tuple: tuple containing: precision, recall, f1_score, + num_infer_chunks, num_label_chunks, + num_correct_chunks + + Examples: + .. code-block:: python + + crf = fluid.layers.linear_chain_crf( + input=hidden, label=label, param_attr=ParamAttr(name="crfw")) + crf_decode = fluid.layers.crf_decoding( + input=hidden, param_attr=ParamAttr(name="crfw")) + fluid.layers.chunk_eval( + input=crf_decode, + label=label, + chunk_scheme="IOB", + num_chunk_types=(label_dict_len - 1) / 2) """ helper = LayerHelper("chunk_eval", **locals()) @@ -1065,6 +1218,7 @@ def chunk_eval(input, num_correct_chunks) +@templatedoc() def sequence_conv(input, num_filters, filter_size=3, @@ -1077,11 +1231,20 @@ def sequence_conv(input, This function creates the op for sequence_conv, using the inputs and other convolutional configurations for the filters and stride as given in the input parameters to the function. - """ - # FIXME(dzh) : want to unify the argument of python layer - # function. So we ignore some unecessary attributes. - # such as, padding_trainable, context_start. + Args: + input (Variable): ${x_comment} + num_filters (int): number of filters. + filter_size (int): the filter size (H and W). + filter_stride (int): stride of the filter. + padding (bool): if True, add paddings. + bias_attr (ParamAttr|None): attributes for bias + param_attr (ParamAttr|None): attributes for parameter + act (str): the activation type + + Returns: + Variable: output of sequence_conv + """ helper = LayerHelper('sequence_conv', **locals()) dtype = helper.input_dtype() @@ -1107,6 +1270,41 @@ def sequence_conv(input, def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): + """ + This function computes the softmax activation among all time-steps for each + sequence. The dimension of each time-step should be 1. Thus, the shape of + input Tensor can be either :math:`[N, 1]` or :math:`[N]`, where :math:`N` + is the sum of the length of all sequences. + + For i-th sequence in a mini-batch: + + .. math:: + + Out(X[lod[i]:lod[i+1]], :) = \\frac{\exp(X[lod[i]:lod[i+1], :])}{\sum(\exp(X[lod[i]:lod[i+1], :]))} + + For example, for a mini-batch of 3 sequences with variable-length, + each containing 2, 3, 2 time-steps, the lod of which is [0, 2, 5, 7], + then softmax will be computed among :math:`X[0:2, :]`, :math:`X[2:5, :]`, + :math:`X[5:7, :]`, and :math:`N` turns out to be 7. + + Args: + input (Variable): The input variable which is a LoDTensor. + bias_attr (ParamAttr|None): attributes for bias + param_attr (ParamAttr|None): attributes for parameter + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \ + library is installed. Default: True + + Returns: + Variable: output of sequence_softmax + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[7, 1], + dtype='float32', lod_level=1) + x_sequence_softmax = fluid.layers.sequence_softmax(input=x) + """ helper = LayerHelper('sequence_softmax', **locals()) dtype = helper.input_dtype() softmax_out = helper.create_tmp_variable(dtype) @@ -1118,7 +1316,49 @@ def sequence_softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): return softmax_out -def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True): +def softmax(input, param_attr=None, bias_attr=None, use_cudnn=True, name=None): + """ + The input of the softmax operator is a tensor of any rank. The output tensor + has the same shape as the input. + + The input tensor will first be logically flattened to a 2-D matrix. The matrix's + second dimension(row length) is as same as the last dimension of the input + tensor, and the first dimension(column length) is the product of all other + dimensions of the input tensor. For each row of the matrix, the softmax operator + squashes the K-dimensional(K is the width of the matrix, which is also the size + of the input tensor's last dimension) vector of arbitrary real values to a + K-dimensional vector of real values in the range [0, 1] that add up to 1. + + It computes the exponential of the given dimension and the sum of exponential + values of all the other dimensions in the K-dimensional vector input. + Then the ratio of the exponential of the given dimension and the sum of + exponential values of all the other dimensions is the output of the softmax + operator. + + For each row :math:`i` and each column :math:`j` in the matrix, we have: + + .. math:: + + Out[i, j] = \\frac{\exp(X[i, j])}{\sum_j(exp(X[i, j])} + + Args: + input (Variable): The input variable. + bias_attr (ParamAttr): attributes for bias + param_attr (ParamAttr): attributes for parameter + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn \ + library is installed. + + Returns: + Variable: output of softmax + + Examples: + + .. code-block:: python + + fc = fluid.layers.fc(input=x, size=10) + softmax = fluid.layers.softmax(input=fc) + + """ helper = LayerHelper('softmax', **locals()) dtype = helper.input_dtype() softmax_out = helper.create_tmp_variable(dtype) @@ -1144,14 +1384,17 @@ def conv2d(input, act=None, name=None): """ - **Convlution2D Layer** - The convolution2D layer calculates the output based on the input, filter - and strides, paddings, dilations, groups parameters. Input(Input) and - Output(Output) are in NCHW format. Where N is batch size, C is the number of + and strides, paddings, dilations, groups parameters. Input and + Output are in NCHW format, where N is batch size, C is the number of channels, H is the height of the feature, and W is the width of the feature. - The details of convolution layer, please refer UFLDL's `convolution, - `_ . + Filter is in MCHW format, where M is the number of output image channels, + C is the number of input image channels, H is the height of the filter, + and W is the width of the filter. If the groups is greater than 1, + C will equal the number of input image channels divided by the groups. + Please refer to UFLDL's `convolution + `_ + for more detials. If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result. @@ -1162,62 +1405,64 @@ def conv2d(input, Out = \sigma (W \\ast X + b) - In the above equation: + Where: * :math:`X`: Input value, a tensor with NCHW format. * :math:`W`: Filter value, a tensor with MCHW format. * :math:`\\ast`: Convolution operation. * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. * :math:`\\sigma`: Activation function. - * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be - different. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: - Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` - Filter shape: $(C_{out}, C_{in}, H_f, W_f)$ + Filter shape: :math:`(C_{out}, C_{in}, H_f, W_f)` - Output: - Output shape: $(N, C_{out}, H_{out}, W_{out})$ + + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where .. math:: - H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ - W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 - - Args: - input(Variable): The input image with [N, C, H, W] format. - num_filters(int): The number of filter. It is as same as the output - image channel. - filter_size(int|tuple|None): The filter size. If filter_size is a tuple, - it must contain two integers, (filter_size_H, filter_size_W). - Otherwise, the filter will be a square. - stride(int|tuple): The stride size. If stride is a tuple, it must - contain two integers, (stride_H, stride_W). Otherwise, the - stride_H = stride_W = stride. Default: stride = 1. - padding(int|tuple): The padding size. If padding is a tuple, it must - contain two integers, (padding_H, padding_W). Otherwise, the - padding_H = padding_W = padding. Default: padding = 0. - dilation(int|tuple): The dilation size. If dilation is a tuple, it must - contain two integers, (dilation_H, dilation_W). Otherwise, the - dilation_H = dilation_W = dilation. Default: dilation = 1. - groups(int): The groups number of the Conv2d Layer. According to grouped - convolution in Alex Krizhevsky's Deep CNN paper: when group=2, - the first half of the filters is only connected to the first half - of the input channels, while the second half of the filters is only - connected to the second half of the input channels. Default: groups=1 - param_attr(ParamAttr): The parameters to the Conv2d Layer. Default: None - bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None - use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn - library is installed. Default: True - act(str): Activation type. Default: None - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. + H_{out}&= \\frac{(H_{in} + 2 * paddings[0] - (dilations[0] * (H_f - 1) + 1))}{strides[0]} + 1 \\\\ + W_{out}&= \\frac{(W_{in} + 2 * paddings[1] - (dilations[1] * (W_f - 1) + 1))}{strides[1]} + 1 + + Args: + input (Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + filter_size (int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. + stride (int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + padding (int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. Default: padding = 0. + dilation (int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. + groups (int): The groups number of the Conv2d Layer. According to grouped + convolution in Alex Krizhevsky's Deep CNN paper: when group=2, + the first half of the filters is only connected to the first half + of the input channels, while the second half of the filters is only + connected to the second half of the input channels. Default: groups=1 + param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None + bias_attr (ParamAttr): Bias parameter for the Conv2d layer. Default: None + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + use_mkldnn (bool): Use mkldnn kernels or not, it is valid only when compiled + with mkldnn library. Default: False + act (str): Activation type. Default: None + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: The tensor variable storing the convolution and \ @@ -1230,13 +1475,9 @@ def conv2d(input, Examples: .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 32, 32], dtype='float32') - conv2d = fluid.layers.conv2d( - input=data, num_filters=2, filter_size=3, act="relu") + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + conv2d = fluid.layers.conv2d(input=data, num_filters=2, filter_size=3, act="relu") """ - if stride is None: - stride = [1, 1] num_channels = input.shape[1] @@ -1299,57 +1540,223 @@ def conv2d(input, return helper.append_activation(pre_act) -def sequence_pool(input, pool_type): +def conv3d(input, + num_filters, + filter_size, + stride=1, + padding=0, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + use_mkldnn=False, + act=None, + name=None): """ - This function add the operator for sequence pooling. - It pools features of all time-steps of each instance, and is applied - on top of the input using pool_type mentioned in the parameters. + **Convlution3D Layer** - It supports four pool_type: + The convolution3D layer calculates the output based on the input, filter + and strides, paddings, dilations, groups parameters. Input(Input) and + Output(Output) are in NCDHW format. Where N is batch size C is the number of + channels, D is the depth of the feature, H is the height of the feature, + and W is the width of the feature. Convlution3D is similar with Convlution2D + but adds one dimension(depth). If bias attribution and activation type are + provided, bias is added to the output of the convolution, and the + corresponding activation function is applied to the final result. - - average: :math:`Out[i] = \\frac{\sum_i X_i}{N}` - - sum: :math:`Out[i] = \sum_jX_{ij}` - - sqrt: :math:`Out[i] = \\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}` - - max: :math:`Out[i] = max(X_i)` + For each input :math:`X`, the equation is: - .. code-block:: text + .. math:: - x is a 1-level LoDTensor: - x.lod = [[0, 2, 5, 7]] - x.data = [1, 3, 2, 4, 6, 5, 1] - x.dims = [7, 1] + Out = \sigma (W \\ast X + b) - then output is a Tensor: - out.dim = [3, 1] - with condition len(x.lod[-1]) - 1 == out.dims[0] + In the above equation: - for different pool_type: - average: out.data = [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 - sum : out.data = [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 - sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), - 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) - max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) + * :math:`X`: Input value, a tensor with NCDHW format. + * :math:`W`: Filter value, a tensor with MCDHW format. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. + + Example: + + - Input: + + Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + Filter shape: :math:`(C_{out}, C_{in}, D_f, H_f, W_f)` + + - Output: + Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + + Where + + .. math:: + + D_{out}&= \\frac{(D_{in} + 2 * paddings[0] - (dilations[0] * (D_f - 1) + 1))}{strides[0]} + 1 \\\\ + H_{out}&= \\frac{(H_{in} + 2 * paddings[1] - (dilations[1] * (H_f - 1) + 1))}{strides[1]} + 1 \\\\ + W_{out}&= \\frac{(W_{in} + 2 * paddings[2] - (dilations[2] * (W_f - 1) + 1))}{strides[2]} + 1 Args: - input(variable): The input variable which is a LoDTensor. - pool_type (string): The pooling type of sequence_pool. - It supports average, sum, sqrt and max. + input (Variable): The input image with [N, C, D, H, W] format. + num_filters(int): The number of filter. It is as same as the output + image channel. + filter_size (int|tuple|None): The filter size. If filter_size is a tuple, + it must contain three integers, (filter_size_D, filter_size_H, filter_size_W). + Otherwise, the filter will be a square. + stride (int|tuple): The stride size. If stride is a tuple, it must + contain three integers, (stride_D, stride_H, stride_W). Otherwise, the + stride_D = stride_H = stride_W = stride. Default: stride = 1. + padding (int|tuple): The padding size. If padding is a tuple, it must + contain three integers, (padding_D, padding_H, padding_W). Otherwise, the + padding_D = padding_H = padding_W = padding. Default: padding = 0. + dilation (int|tuple): The dilation size. If dilation is a tuple, it must + contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the + dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1. + groups (int): The groups number of the Conv3d Layer. According to grouped + convolution in Alex Krizhevsky's Deep CNN paper: when group=2, + the first half of the filters is only connected to the first half + of the input channels, while the second half of the filters is only + connected to the second half of the input channels. Default: groups=1 + param_attr (ParamAttr): The parameters to the Conv3d Layer. Default: None + bias_attr (ParamAttr): Bias parameter for the Conv3d layer. Default: None + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + use_mkldnn (bool): Use mkldnn kernels or not. + act (str): Activation type. Default: None + name (str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: - The sequence pooling variable which is a Tensor. + Variable: The tensor variable storing the convolution and \ + non-linearity activation result. - Examples: + Raises: + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. + Examples: .. code-block:: python - x = fluid.layers.data(name='x', shape=[7, 1], - dtype='float32', lod_level=1) - avg_x = fluid.layers.sequence_pool(input=x, pool_type='average') - sum_x = fluid.layers.sequence_pool(input=x, pool_type='sum') - sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt') - max_x = fluid.layers.sequence_pool(input=x, pool_type='max') + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + conv3d = fluid.layers.conv3d(input=data, num_filters=2, filter_size=3, act="relu") """ - helper = LayerHelper('sequence_pool', **locals()) + + l_type = 'conv3d' + + helper = LayerHelper(l_type, **locals()) + dtype = helper.input_dtype() + + num_channels = input.shape[1] + + if groups is None: + num_filter_channels = num_channels + else: + if num_channels % groups != 0: + raise ValueError("num_channels must be divisible by groups.") + num_filter_channels = num_channels / groups + + filter_size = utils.convert_to_list(filter_size, 3, 'filter_size') + stride = utils.convert_to_list(stride, 3, 'stride') + padding = utils.convert_to_list(padding, 3, 'padding') + dilation = utils.convert_to_list(dilation, 3, 'dilation') + + if not isinstance(use_cudnn, bool): + raise ValueError("use_cudnn should be True or False") + + input_shape = input.shape + filter_shape = [num_filters, num_filter_channels] + filter_size + + def _get_default_param_initializer(): + std = (2.0 / (filter_size[0]**3 * num_channels))**0.5 + return Normal(0.0, std, 0) + + filter_param = helper.create_parameter( + attr=helper.param_attr, + shape=filter_shape, + dtype=dtype, + default_initializer=_get_default_param_initializer()) + + pre_bias = helper.create_tmp_variable(dtype) + + helper.append_op( + type=l_type, + inputs={ + 'Input': input, + 'Filter': filter_param, + }, + outputs={"Output": pre_bias}, + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'groups': groups, + 'use_cudnn': use_cudnn, + 'use_mkldnn': use_mkldnn + }) + + pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) + + return helper.append_activation(pre_act) + + +def sequence_pool(input, pool_type): + """ + This function add the operator for sequence pooling. + It pools features of all time-steps of each instance, and is applied + on top of the input using pool_type mentioned in the parameters. + + It supports four pool_type: + + - average: :math:`Out[i] = \\frac{\sum_i X_i}{N}` + - sum: :math:`Out[i] = \sum_jX_{ij}` + - sqrt: :math:`Out[i] = \\frac{\sum_jX_{ij}}{\sqrt{len(X_i)}}` + - max: :math:`Out[i] = max(X_i)` + + .. code-block:: text + + x is a 1-level LoDTensor: + x.lod = [[2, 3, 2]] + x.data = [1, 3, 2, 4, 6, 5, 1] + x.dims = [7, 1] + + then output is a Tensor: + out.dim = [3, 1] + with condition len(x.lod[-1]) == out.dims[0] + + for different pool_type: + average: out.data = [2, 4, 3], where 2=(1+3)/2, 4=(2+4+6)/3, 3=(5+1)/2 + sum : out.data = [4, 12, 6], where 4=1+3, 12=2+4+6, 6=5+1 + sqrt : out.data = [2.82, 6.93, 4.24], where 2.82=(1+3)/sqrt(2), + 6.93=(2+4+6)/sqrt(3), 4.24=(5+1)/sqrt(2) + max : out.data = [3, 6, 5], where 3=max(1,3), 6=max(2,4,6), 5=max(5,1) + last : out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) + first : out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) + + Args: + input(variable): The input variable which is a LoDTensor. + pool_type (string): The pooling type of sequence_pool. + It supports average, sum, sqrt and max. + + Returns: + The sequence pooling variable which is a Tensor. + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[7, 1], + dtype='float32', lod_level=1) + avg_x = fluid.layers.sequence_pool(input=x, pool_type='average') + sum_x = fluid.layers.sequence_pool(input=x, pool_type='sum') + sqrt_x = fluid.layers.sequence_pool(input=x, pool_type='sqrt') + max_x = fluid.layers.sequence_pool(input=x, pool_type='max') + last_x = fluid.layers.sequence_pool(input=x, pool_type='last') + first_x = fluid.layers.sequence_pool(input=x, pool_type='first') + """ + helper = LayerHelper('sequence_pool', **locals()) dtype = helper.input_dtype() pool_out = helper.create_tmp_variable(dtype) max_index = helper.create_tmp_variable(dtype) @@ -1371,18 +1778,18 @@ def sequence_pool(input, pool_type): def sequence_first_step(input): """ - This funciton get the first step of sequence. + This function gets the first step of sequence. .. code-block:: text x is a 1-level LoDTensor: - x.lod = [[0, 2, 5, 7]] + x.lod = [[2, 3, 2]] x.data = [1, 3, 2, 4, 6, 5, 1] x.dims = [7, 1] then output is a Tensor: out.dim = [3, 1] - with condition len(x.lod[-1]) - 1 == out.dims[0] + with condition len(x.lod[-1]) == out.dims[0] out.data = [1, 2, 5], where 1=first(1,3), 2=first(2,4,6), 5=first(5,1) Args: @@ -1404,18 +1811,18 @@ def sequence_first_step(input): def sequence_last_step(input): """ - This funciton get the last step of sequence. + This function gets the last step of sequence. .. code-block:: text x is a 1-level LoDTensor: - x.lod = [[0, 2, 5, 7]] + x.lod = [[2, 3, 2]] x.data = [1, 3, 2, 4, 6, 5, 1] x.dims = [7, 1] then output is a Tensor: out.dim = [3, 1] - with condition len(x.lod[-1]) - 1 == out.dims[0] + with condition len(x.lod[-1]) == out.dims[0] out.data = [3, 6, 1], where 3=last(1,3), 6=last(2,4,6), 1=last(5,1) Args: @@ -1435,6 +1842,7 @@ def sequence_last_step(input): return sequence_pool(input=input, pool_type="last") +@templatedoc() def pool2d(input, pool_size=-1, pool_type="max", @@ -1446,8 +1854,45 @@ def pool2d(input, use_mkldnn=False, name=None): """ - This function adds the operator for pooling in 2 dimensions, using the - pooling configurations mentioned in input parameters. + ${comment} + + Args: + input (Variable): The input tensor of pooling operator. The format of + input tensor is NCHW, where N is batch size, C is + the number of channels, H is the height of the + feature, and W is the width of the feature. + pool_size (int): The side length of pooling windows. All pooling + windows are squares with pool_size on a side. + pool_type: ${pooling_type_comment} + pool_stride (int): stride of the pooling layer. + pool_padding (int): padding size. + global_pooling: ${global_pooling_comment} + use_cudnn: ${use_cudnn_comment} + ceil_mode: ${ceil_mode_comment} + use_mkldnn: ${use_mkldnn_comment} + name (str|None): A name for this layer(optional). If set None, the + layer will be named automatically. + + Returns: + Variable: The pooling result. + + Raises: + ValueError: If 'pool_type' is not "max" nor "avg" + ValueError: If 'global_pooling' is False and 'pool_size' is -1 + ValueError: If 'use_cudnn' is not a bool value. + + Examples: + + .. code-block:: python + + data = fluid.layers.data( + name='data', shape=[3, 32, 32], dtype='float32') + conv2d = fluid.layers.pool2d( + input=data, + pool_size=2, + pool_type='max', + pool_stride=1, + global_pooling=False) """ if pool_type not in ["max", "avg"]: raise ValueError( @@ -1466,12 +1911,84 @@ def pool2d(input, if not isinstance(use_cudnn, bool): raise ValueError("use_cudnn should be True or False") - helper = LayerHelper('pool2d', **locals()) + l_type = 'pool2d' + + helper = LayerHelper(l_type, **locals()) + dtype = helper.input_dtype() + pool_out = helper.create_tmp_variable(dtype) + + helper.append_op( + type=l_type, + inputs={"X": input}, + outputs={"Out": pool_out}, + attrs={ + "pooling_type": pool_type, + "ksize": pool_size, + "global_pooling": global_pooling, + "strides": pool_stride, + "paddings": pool_padding, + "use_cudnn": use_cudnn, + "ceil_mode": ceil_mode, + "use_mkldnn": use_mkldnn + }) + + return pool_out + + +def pool3d(input, + pool_size=-1, + pool_type="max", + pool_stride=1, + pool_padding=0, + global_pooling=False, + use_cudnn=True, + ceil_mode=False, + use_mkldnn=False, + name=None): + """ + This function adds the operator for pooling in 3-dimensions, using the + pooling configurations mentioned in input parameters. + + Args: + input (Variable): ${input_comment} + pool_size (int): ${ksize_comment} + pool_type (str): ${pooling_type_comment} + pool_stride (int): stride of the pooling layer. + pool_padding (int): padding size. + global_pooling (bool): ${global_pooling_comment} + use_cudnn (bool): ${use_cudnn_comment} + ceil_mode (bool): ${ceil_mode_comment} + use_mkldnn (bool): ${use_mkldnn_comment} + name (str): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: output of pool3d layer. + """ + if pool_type not in ["max", "avg"]: + raise ValueError( + "Unknown pool_type: '%s'. It can only be 'max' or 'avg'.", + str(pool_type)) + + if global_pooling is False and pool_size == -1: + raise ValueError( + "When the global_pooling is False, pool_size must be passed " + "and be a valid value. Received pool_size: " + str(pool_size)) + + pool_size = utils.convert_to_list(pool_size, 3, 'pool_size') + pool_padding = utils.convert_to_list(pool_padding, 3, 'pool_padding') + pool_stride = utils.convert_to_list(pool_stride, 3, 'pool_stride') + + if not isinstance(use_cudnn, bool): + raise ValueError("use_cudnn should be True or False") + + l_type = "pool3d" + helper = LayerHelper(l_type, **locals()) dtype = helper.input_dtype() pool_out = helper.create_tmp_variable(dtype) helper.append_op( - type="pool2d", + type=l_type, inputs={"X": input}, outputs={"Out": pool_out}, attrs={ @@ -1501,10 +2018,61 @@ def batch_norm(input, name=None, moving_mean_name=None, moving_variance_name=None, - do_model_average_for_mean_and_var=False): + do_model_average_for_mean_and_var=False, + fuse_with_relu=False): """ - This function helps create an operator to implement - the BatchNorm layer using the configurations from the input parameters. + **Batch Normalization Layer** + + Can be used as a normalizer function for conv2d and fully_connected operations. + The required data format for this layer is one of the following: + + 1. NHWC `[batch, in_height, in_width, in_channels]` + + 2. NCHW `[batch, in_channels, in_height, in_width]` + + Refer to `Batch Normalization: Accelerating Deep Network Training by Reducing + Internal Covariate Shift `_ + for more details. + + :math:`input` is the input features over a mini-batch. + + .. math:: + + \\mu_{\\beta} &\\gets \\frac{1}{m} \\sum_{i=1}^{m} x_i \\qquad &//\\ + \ mini-batch\ mean \\\\ + \\sigma_{\\beta}^{2} &\\gets \\frac{1}{m} \\sum_{i=1}^{m}(x_i - \\ + \\mu_{\\beta})^2 \\qquad &//\ mini-batch\ variance \\\\ + \\hat{x_i} &\\gets \\frac{x_i - \\mu_\\beta} {\\sqrt{\\ + \\sigma_{\\beta}^{2} + \\epsilon}} \\qquad &//\ normalize \\\\ + y_i &\\gets \\gamma \\hat{x_i} + \\beta \\qquad &//\ scale\ and\ shift + + Args: + input(variable): The input variable which is a LoDTensor. + act(string, Default None): Activation type, linear|relu|prelu|... + is_test(bool, Default False): Used for training or training. + momentum(float, Default 0.9): + epsilon(float, Default 1e-05): + param_attr(ParamAttr): The parameter attribute for Parameter `scale`. + bias_attr(ParamAttr): The parameter attribute for Parameter `bias`. + data_layout(string, default NCHW): NCHW|NHWC + in_place(bool, Default False): Make the input and output of batch norm reuse memory. + use_mkldnn(bool, Default false): ${use_mkldnn_comment} + name(string, Default None): A name for this layer(optional). If set None, the layer + will be named automatically. + moving_mean_name(string, Default None): The name of moving_mean which store the global Mean. + moving_variance_name(string, Default None): The name of the moving_variance which store the global Variance. + do_model_average_for_mean_and_var(bool, Default False): Do model average for mean and variance or not. + fuse_with_relu (bool): if True, this OP performs relu after batch norm. + + Returns: + Variable: A tensor variable which is the result after applying batch normalization on the input. + + Examples: + + .. code-block:: python + + hidden1 = fluid.layers.fc(input=x, size=200, param_attr='fc1.w') + hidden2 = fluid.layers.batch_norm(input=hidden1) """ helper = LayerHelper('batch_norm', **locals()) dtype = helper.input_dtype() @@ -1580,12 +2148,14 @@ def batch_norm(input, "momentum": momentum, "epsilon": epsilon, "is_test": is_test, - "use_mkldnn": use_mkldnn + "use_mkldnn": use_mkldnn, + "fuse_with_relu": fuse_with_relu }) return helper.append_activation(batch_norm_out) +@templatedoc() def layer_norm(input, scale=True, shift=True, @@ -1596,20 +2166,11 @@ def layer_norm(input, act=None, name=None): """ - **Layer Normalization** - - Assume feature vectors exist on dimensions - :attr:`begin_norm_axis ... rank(input)` and calculate the moment statistics - along these dimensions for each feature vector :math:`a` with size - :math:`H`, then normalize each feature vector using the corresponding - statistics. After that, apply learnable gain and bias on the normalized - tensor to scale and shift if :attr:`scale` and :attr:`shift` are set. - - Refer to `Layer Normalization `_ + ${comment} The formula is as follows: - .. math:: + .. math:: \\mu & = \\frac{1}{H}\\sum_{i=1}^{H} a_i @@ -1617,6 +2178,15 @@ def layer_norm(input, h & = f(\\frac{g}{\\sigma}(a - \\mu) + b) + * :math:`a`: the vector representation of the summed inputs to the neurons + in that layer. + + * :math:`H`: the number of hidden units in a layers + + * :math:`g`: the trainable scale parameter. + + * :math:`b`: the trainable bias parameter. + Args: input(Variable): The input tensor variable. scale(bool): Whether to learn the adaptive gain :math:`g` after @@ -1632,16 +2202,16 @@ def layer_norm(input, bias_attr(ParamAttr|None): The parameter attribute for the learnable bias :math:`b`. act(str): Activation to be applied to the output of layer normalizaiton. + name (str): The name of this layer. It is optional. Returns: - Variable: A tensor variable with the same shape as the input. + ${y_comment} Examples: - .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 32, 32], dtype='float32') - x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) + >>> data = fluid.layers.data(name='data', shape=[3, 32, 32], + >>> dtype='float32') + >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) """ helper = LayerHelper('layer_norm', **locals()) dtype = helper.input_dtype() @@ -1682,23 +2252,6 @@ def layer_norm(input, return helper.append_activation(layer_norm_out) -def beam_search_decode(ids, scores, name=None): - helper = LayerHelper('beam_search_decode', **locals()) - sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) - sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) - - helper.append_op( - type="beam_search_decode", - inputs={"Ids": ids, - "Scores": scores}, - outputs={ - "SentenceIds": sentence_ids, - "SentenceScores": sentence_scores - }) - - return sentence_ids, sentence_scores - - def conv2d_transpose(input, num_filters, output_size=None, @@ -1706,6 +2259,7 @@ def conv2d_transpose(input, padding=0, stride=1, dilation=1, + groups=None, param_attr=None, bias_attr=None, use_cudnn=True, @@ -1722,32 +2276,36 @@ def conv2d_transpose(input, represent height and width, respectively. The details of convolution transpose layer, please refer to the following explanation and references `therein `_. + If bias attribution and activation type are provided, bias is added to + the output of the convolution, and the corresponding activation function + is applied to the final result. For each input :math:`X`, the equation is: .. math:: - Out = W \\ast X + Out = \sigma (W \\ast X + b) - In the above equation: + Where: * :math:`X`: Input value, a tensor with NCHW format. * :math:`W`: Filter value, a tensor with MCHW format. - * :math:`\\ast` : Convolution transpose operation. - * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be - different. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. Example: - Input: - Input shape: $(N, C_{in}, H_{in}, W_{in})$ + Input shape: :math:`(N, C_{in}, H_{in}, W_{in})` - Filter shape: $(C_{in}, C_{out}, H_f, W_f)$ + Filter shape: :math:`(C_{in}, C_{out}, H_f, W_f)` - Output: - Output shape: $(N, C_{out}, H_{out}, W_{out})$ + Output shape: :math:`(N, C_{out}, H_{out}, W_{out})` Where @@ -1757,53 +2315,64 @@ def conv2d_transpose(input, W_{out} &= (W_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (W_f - 1) + 1 Args: - input(Variable): The input image with [N, C, H, W] format. - num_filters(int): The number of the filter. It is as same as the output - image channel. - output_size(int|tuple|None): The output image size. If output size is a - tuple, it must contain two integers, (image_H, image_W). This - parameter only works when filter_size is None. - filter_size(int|tuple|None): The filter size. If filter_size is a tuple, - it must contain two integers, (filter_size_H, filter_size_W). - Otherwise, the filter will be a square. None if use output size to - calculate filter_size. - padding(int|tuple): The padding size. If padding is a tuple, it must - contain two integers, (padding_H, padding_W). Otherwise, the - padding_H = padding_W = padding. Default: padding = 0. - stride(int|tuple): The stride size. If stride is a tuple, it must - contain two integers, (stride_H, stride_W). Otherwise, the - stride_H = stride_W = stride. Default: stride = 1. - dilation(int|tuple): The dilation size. If dilation is a tuple, it must - contain two integers, (dilation_H, dilation_W). Otherwise, the - dilation_H = dilation_W = dilation. Default: dilation = 1. - param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer. - Default: None - bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None - use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn - library is installed. Default: True - act(str): Activation type. Default: None - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. - - Returns: - Variable: The tensor variable storing the convolution transpose result. + input(Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of the filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain two integers, (image_H, image_W). This + parameter only works when filter_size is None. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain two integers, (filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size. + padding(int|tuple): The padding size. If padding is a tuple, it must + contain two integers, (padding_H, padding_W). Otherwise, the + padding_H = padding_W = padding. Default: padding = 0. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain two integers, (stride_H, stride_W). Otherwise, the + stride_H = stride_W = stride. Default: stride = 1. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain two integers, (dilation_H, dilation_W). Otherwise, the + dilation_H = dilation_W = dilation. Default: dilation = 1. + groups(int): The groups number of the Conv2d transpose layer. Inspired by + grouped convolution in Alex Krizhevsky's Deep CNN paper, in which + when group=2, the first half of the filters is only connected to the + first half of the input channels, while the second half of the + filters is only connected to the second half of the input channels. + Default: groups=1 + param_attr(ParamAttr): The parameters to the Conv2d_transpose Layer. + Default: None + bias_attr(ParamAttr): Bias parameter for the Conv2d layer. Default: None + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + act(str): Activation type. Default: None + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The tensor variable storing the convolution transpose result. Raises: - ValueError: If the shapes of input, filter_size, stride, padding and - groups mismatch. + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. Examples: .. code-block:: python - data = fluid.layers.data( - name='data', shape=[3, 32, 32], dtype='float32') - conv2d_transpose = fluid.layers.conv2d_transpose( - input=data, num_filters=2, filter_size=3) + data = fluid.layers.data(name='data', shape=[3, 32, 32], dtype='float32') + conv2d_transpose = fluid.layers.conv2d_transpose(input=data, num_filters=2, filter_size=3) """ - helper = LayerHelper("conv2d_transpose", **locals()) + + input_channel = input.shape[1] + + op_type = 'conv2d_transpose' + if (input_channel == groups and num_filters == input_channel and + not use_cudnn): + op_type = 'depthwise_conv2d_transpose' + + helper = LayerHelper(op_type, **locals()) if not isinstance(input, Variable): raise TypeError("Input of conv2d_transpose must be Variable") - input_channel = input.shape[1] padding = utils.convert_to_list(padding, 2, 'padding') stride = utils.convert_to_list(stride, 2, 'stride') @@ -1830,13 +2399,183 @@ def conv2d_transpose(input, filter_size = utils.convert_to_list(filter_size, 2, 'conv2d_transpose.filter_size') - filter_shape = [input_channel, num_filters] + filter_size + groups = 1 if groups is None else groups + filter_shape = [input_channel, num_filters / groups] + filter_size + img_filter = helper.create_parameter( + dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) + + pre_bias = helper.create_tmp_variable(dtype=input.dtype) + helper.append_op( + type=op_type, + inputs={'Input': [input], + 'Filter': [img_filter]}, + outputs={'Output': pre_bias}, + attrs={ + 'strides': stride, + 'paddings': padding, + 'dilations': dilation, + 'groups': groups, + 'use_cudnn': use_cudnn + }) + + pre_act = helper.append_bias_op(pre_bias, dim_start=1, dim_end=2) + out = helper.append_activation(pre_act) + return out + + +def conv3d_transpose(input, + num_filters, + output_size=None, + filter_size=None, + padding=0, + stride=1, + dilation=1, + groups=None, + param_attr=None, + bias_attr=None, + use_cudnn=True, + act=None, + name=None): + """ + **Convlution3D transpose layer** + + The convolution3D transpose layer calculates the output based on the input, + filter, and dilations, strides, paddings. Input(Input) and output(Output) + are in NCDHW format. Where N is batch size, C is the number of channels, + D is the depth of the feature, H is the height of the feature, and W + is the width of the feature. Parameters(dilations, strides, paddings) are + two elements. These two elements represent height and width, respectively. + The details of convolution transpose layer, please refer to the following + explanation and references `therein `_. + If bias attribution and activation type are provided, bias is added to + the output of the convolution, and the corresponding activation function + is applied to the final result. + + For each input :math:`X`, the equation is: + + .. math:: + + Out = \sigma (W \\ast X + b) + + In the above equation: + + * :math:`X`: Input value, a tensor with NCDHW format. + * :math:`W`: Filter value, a tensor with MCDHW format. + * :math:`\\ast`: Convolution operation. + * :math:`b`: Bias value, a 2-D tensor with shape [M, 1]. + * :math:`\\sigma`: Activation function. + * :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different. + + Example: + + - Input: + + Input shape: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})` + + Filter shape: :math:`(C_{in}, C_{out}, D_f, H_f, W_f)` + + - Output: + + Output shape: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` + + Where + + .. math:: + + D_{out} &= (D_{in} - 1) * strides[0] - 2 * paddings[0] + dilations[0] * (D_f - 1) + 1 \\\\ + H_{out} &= (H_{in} - 1) * strides[1] - 2 * paddings[1] + dilations[1] * (H_f - 1) + 1 \\\\ + W_{out} &= (W_{in} - 1) * strides[2] - 2 * paddings[2] + dilations[2] * (W_f - 1) + 1 + + Args: + input(Variable): The input image with [N, C, D, H, W] format. + num_filters(int): The number of the filter. It is as same as the output + image channel. + output_size(int|tuple|None): The output image size. If output size is a + tuple, it must contain three integers, (image_D, image_H, image_W). This + parameter only works when filter_size is None. + filter_size(int|tuple|None): The filter size. If filter_size is a tuple, + it must contain three integers, (filter_size_D, filter_size_H, filter_size_W). + Otherwise, the filter will be a square. None if use output size to + calculate filter_size. + padding(int|tuple): The padding size. If padding is a tuple, it must + contain three integers, (padding_D, padding_H, padding_W). Otherwise, the + padding_D = padding_H = padding_W = padding. Default: padding = 0. + stride(int|tuple): The stride size. If stride is a tuple, it must + contain three integers, (stride_D, stride_H, stride_W). Otherwise, the + stride_D = stride_H = stride_W = stride. Default: stride = 1. + dilation(int|tuple): The dilation size. If dilation is a tuple, it must + contain three integers, (dilation_D, dilation_H, dilation_W). Otherwise, the + dilation_D = dilation_H = dilation_W = dilation. Default: dilation = 1. + groups(int): The groups number of the Conv3d transpose layer. Inspired by + grouped convolution in Alex Krizhevsky's Deep CNN paper, in which + when group=2, the first half of the filters is only connected to the + first half of the input channels, while the second half of the + filters is only connected to the second half of the input channels. + Default: groups=1 + param_attr(ParamAttr): The parameters to the Conv3d_transpose Layer. + Default: None + bias_attr(ParamAttr): Bias parameter for the Conv3d layer. Default: None + use_cudnn(bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + act(str): Activation type. Default: None + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The tensor variable storing the convolution transpose result. + + Raises: + ValueError: If the shapes of input, filter_size, stride, padding and + groups mismatch. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='data', shape=[3, 12, 32, 32], dtype='float32') + conv3d_transpose = fluid.layers.conv3d_transpose(input=data, num_filters=2, filter_size=3) + """ + l_type = "conv3d_transpose" + helper = LayerHelper(l_type, **locals()) + if not isinstance(input, Variable): + raise TypeError("Input of conv3d_transpose must be Variable") + input_channel = input.shape[1] + + padding = utils.convert_to_list(padding, 3, 'padding') + stride = utils.convert_to_list(stride, 3, 'stride') + dilation = utils.convert_to_list(dilation, 3, 'dilation') + + if not isinstance(use_cudnn, bool): + raise ValueError("use_cudnn should be True or False") + + if filter_size is None: + if output_size is None: + raise ValueError("output_size must be set when filter_size is None") + if isinstance(output_size, int): + output_size = [output_size, output_size] + + d_in = input.shape[2] + h_in = input.shape[3] + w_in = input.shape[4] + + filter_size_d = (output_size[0] - (d_in - 1) * stride[0] + 2 * + padding[0] - 1) / dilation[0] + 1 + filter_size_h = (output_size[1] - (h_in - 1) * stride[1] + 2 * + padding[1] - 1) / dilation[1] + 1 + filter_size_w = (output_size[2] - (w_in - 1) * stride[2] + 2 * + padding[2] - 1) / dilation[2] + 1 + filter_size = [filter_size_d, filter_size_h, filter_size_w] + else: + filter_size = utils.convert_to_list(filter_size, 3, + 'conv3d_transpose.filter_size') + + groups = 1 if groups is None else groups + filter_shape = [input_channel, num_filters / groups] + filter_size img_filter = helper.create_parameter( dtype=input.dtype, shape=filter_shape, attr=helper.param_attr) pre_bias = helper.create_tmp_variable(dtype=input.dtype) helper.append_op( - type='conv2d_transpose', + type=l_type, inputs={'Input': [input], 'Filter': [img_filter]}, outputs={'Output': pre_bias}, @@ -1844,6 +2583,7 @@ def conv2d_transpose(input, 'strides': stride, 'paddings': padding, 'dilations': dilation, + 'groups': groups, 'use_cudnn': use_cudnn }) @@ -1863,18 +2603,18 @@ def sequence_expand(x, y, ref_level=-1, name=None): * Case 1 x is a LoDTensor: - x.lod = [[0, 2, 4]] + x.lod = [[2, 2]] x.data = [[a], [b], [c], [d]] x.dims = [4, 1] y is a LoDTensor: - y.lod = [[0, 2, 4], - [0, 3, 6, 7, 8]] + y.lod = [[2, 2], + [3, 3, 1, 1]] ref_level: 0 then output is a 1-level LoDTensor: - out.lod = [[0, 2, 4, 6, 8]] + out.lod = [[2, 2, 2, 2]] out.data = [[a], [b], [a], [b], [c], [d], [c], [d]] out.dims = [8, 1] @@ -1884,7 +2624,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): x.dims = [3, 1] y is a LoDTensor: - y.lod = [[0, 2, 2, 5]] + y.lod = [[2, 0, 3]] ref_level: -1 @@ -1922,10 +2662,89 @@ def sequence_expand(x, y, ref_level=-1, name=None): return tmp -def beam_search(pre_ids, ids, scores, beam_size, end_id, level=0): - ''' - This function implements the beam search algorithm. - ''' +def beam_search(pre_ids, + pre_scores, + ids, + scores, + beam_size, + end_id, + level=0, + name=None): + """ + Beam search is a classical algorithm for selecting candidate words in a + machine translation task. + + Refer to `Beam search `_ + for more details. + + This layer does the search in beams for one time step. Specifically, it + selects the top-K candidate word ids of current step from :attr:`ids` + according to their :attr:`scores` for all source sentences, where K is + :attr:`beam_size` and :attr:`ids, scores` are predicted results from the + computation cell. Additionally, :attr:`pre_ids` and :attr:`pre_scores` are + the output of beam_search at previous step, they are needed for special use + to handle ended candidate translations. + + Note that the :attr:`scores` passed in should be accumulated scores, and + length penalty should be done with extra operators before calculating the + accumulated scores if needed, also suggest finding top-K before it and + using the top-K candidates following. + + Please see the following demo for a fully beam search usage example: + + fluid/tests/book/test_machine_translation.py + + Args: + pre_ids(Variable): The LodTensor variable which is the output of + beam_search at previous step. It should be a LodTensor with shape + :math:`(batch_size, 1)` and lod + :math:`[[0, 1, ... , batch_size], [0, 1, ..., batch_size]]` at the + first step. + pre_scores(Variable): The LodTensor variable which is the output of + beam_search at previous step. + ids(Variable): The LodTensor variable containing the candidates ids. + Its shape should be :math:`(batch_size \\times beam_size, K)`, + where :math:`K` supposed to be :attr:`beam_size`. + scores(Variable): The LodTensor variable containing the accumulated + scores corresponding to :attr:`ids` and its shape is the same as + the shape of :attr:`ids`. + beam_size(int): The beam width used in beam search. + end_id(int): The id of end token. + level(int, default 0): It can be ignored and mustn't change currently. + It means the source level of lod, which is explained as following. + The lod level of :attr:`ids` should be 2. The first level is source + level which describes how many prefixes (branchs) for each source + sentece (beam), and the second level is sentence level which + describes how these candidates belong to the prefix. The paths + linking prefixes and selected candidates are organized and reserved + in lod. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The LodTensor pair containing the selected ids and the \ + corresponding scores. + + Examples: + .. code-block:: python + + # Suppose `probs` contains predicted results from the computation + # cell and `pre_ids` and `pre_scores` is the output of beam_search + # at previous step. + topk_scores, topk_indices = layers.topk(probs, k=beam_size) + accu_scores = layers.elementwise_add( + x=layers.log(x=topk_scores)), + y=layers.reshape( + pre_scores, shape=[-1]), + axis=0) + selected_ids, selected_scores = layers.beam_search( + pre_ids=pre_ids, + pre_scores=pre_scores, + ids=topk_indices, + scores=accu_scores, + beam_size=beam_size, + end_id=end_id) + """ helper = LayerHelper('beam_search', **locals()) score_type = scores.dtype id_type = ids.dtype @@ -1937,6 +2756,7 @@ def beam_search(pre_ids, ids, scores, beam_size, end_id, level=0): type='beam_search', inputs={ 'pre_ids': pre_ids, + 'pre_scores': pre_scores, 'ids': ids, 'scores': scores, }, @@ -1954,6 +2774,56 @@ def beam_search(pre_ids, ids, scores, beam_size, end_id, level=0): return selected_ids, selected_scores +def beam_search_decode(ids, scores, beam_size, end_id, name=None): + """ + Beam Search Decode Layer. This layer constructs the full hypotheses for + each source sentence by walking back along the LoDTensorArray :attr:`ids` + whose lods can be used to restore the path in the beam search tree. + Please see the following demo for a fully beam search usage example: + fluid/tests/book/test_machine_translation.py + + Args: + ids(Variable): The LodTensorArray variable containing the selected ids + of all steps. + scores(Variable): The LodTensorArray variable containing the selected + scores of all steps. + beam_size(int): The beam width used in beam search. + end_id(int): The id of end token. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The LodTensor pair containing the generated id sequences \ + and the corresponding scores. The shapes and lods of the two \ + LodTensor are same. The lod level is 2 and the two levels \ + separately indicate how many hypotheses each source sentence has \ + and how many ids each hypothesis has. + + Examples: + .. code-block:: python + # Suppose `ids` and `scores` are LodTensorArray variables reserving + # the selected ids and scores of all steps + finished_ids, finished_scores = layers.beam_search_decode( + ids, scores, beam_size=5, end_id=0) + """ + helper = LayerHelper('beam_search_decode', **locals()) + sentence_ids = helper.create_tmp_variable(dtype=ids.dtype) + sentence_scores = helper.create_tmp_variable(dtype=ids.dtype) + + helper.append_op( + type="beam_search_decode", + inputs={"Ids": ids, + "Scores": scores}, + outputs={ + "SentenceIds": sentence_ids, + "SentenceScores": sentence_scores + }, + attrs={"beam_size": beam_size, + "end_id": end_id}) + + return sentence_ids, sentence_scores + + def lstm_unit(x_t, hidden_t_prev, cell_t_prev, @@ -2080,11 +2950,11 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): Args: input (Variable): The input variable which is a Tensor or LoDTensor. - dim (int|None): The dimension along which the sum is performed. If + dim (list|int|None): The dimensions along which the sum is performed. If :attr:`None`, sum all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the - range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`, - the dimension to reduce is :math:`rank + dim`. + range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, + the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool|False): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. @@ -2100,20 +2970,30 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): # x is a Tensor variable with following elements: # [[0.2, 0.3, 0.5, 0.9] # [0.1, 0.2, 0.6, 0.7]] - # Each example is followed by the correspending output tensor. + # Each example is followed by the corresponding output tensor. fluid.layers.reduce_sum(x) # [3.5] fluid.layers.reduce_sum(x, dim=0) # [0.3, 0.5, 1.1, 1.6] fluid.layers.reduce_sum(x, dim=-1) # [1.9, 1.6] fluid.layers.reduce_sum(x, dim=1, keep_dim=True) # [[1.9], [1.6]] + + # x is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1, 2], [3, 4]], + # [[5, 6], [7, 8]]] + # Each example is followed by the corresponding output tensor. + fluid.layers.reduce_sum(x, dim=[1, 2]) # [10, 26] + fluid.layers.reduce_sum(x, dim=[0, 1]) # [16, 20] + """ helper = LayerHelper('reduce_sum', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] helper.append_op( type='reduce_sum', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None else 0, + 'dim': dim if dim != None else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None else False }) @@ -2122,23 +3002,24 @@ def reduce_sum(input, dim=None, keep_dim=False, name=None): def reduce_mean(input, dim=None, keep_dim=False, name=None): """ - Computes the mean of tensor elements over the given dimension. + Computes the mean of the input tensor's elements along the given dimension. Args: input (Variable): The input variable which is a Tensor or LoDTensor. - dim (int|None): The dimension along which the mean is computed. If - :attr:`None`, compute the mean over all elements of :attr:`input` - and return a Tensor variable with a single element, otherwise + dim (list|int|None): The dimension along which the mean is computed. If + `None`, compute the mean over all elements of :attr:`input` + and return a variable with a single element, otherwise it must be in the range :math:`[-rank(input), rank(input))`. If - :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. + :math:`dim[i] < 0`, the dimension to reduce is + :math:`rank(input) + dim[i]`. keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. - name(str|None): A name for this layer(optional). If set None, the layer + name(str|None): A name for this layer(optional). If set `None`, the layer will be named automatically. Returns: - Variable: The reduced Tensor variable. + Variable: The reduced mean Variable. Examples: .. code-block:: python @@ -2150,16 +3031,26 @@ def reduce_mean(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_mean(x) # [0.4375] fluid.layers.reduce_mean(x, dim=0) # [0.15, 0.25, 0.55, 0.8] fluid.layers.reduce_mean(x, dim=-1) # [0.475, 0.4] - fluid.layers.reduce_mean(x, dim=1, keep_dim=True) # [[0.475], [0.4]] + fluid.layers.reduce_mean( + x, dim=1, keep_dim=True) # [[0.475], [0.4]] + + # x is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_mean(x, dim=[1, 2]) # [2.5, 6.5] + fluid.layers.reduce_mean(x, dim=[0, 1]) # [4.0, 5.0] """ helper = LayerHelper('reduce_mean', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] helper.append_op( type='reduce_mean', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None else 0, + 'dim': dim if dim != None else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None else False }) @@ -2172,11 +3063,11 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): Args: input (Variable): The input variable which is a Tensor or LoDTensor. - dim (int|None): The dimension along which the maximum is computed. + dim (list|int|None): The dimension along which the maximum is computed. If :attr:`None`, compute the maximum over all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. - If :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. + If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. @@ -2197,15 +3088,24 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_max(x, dim=0) # [0.2, 0.3, 0.6, 0.9] fluid.layers.reduce_max(x, dim=-1) # [0.9, 0.7] fluid.layers.reduce_max(x, dim=1, keep_dim=True) # [[0.9], [0.7]] + + # x is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_max(x, dim=[1, 2]) # [4.0, 8.0] + fluid.layers.reduce_max(x, dim=[0, 1]) # [7.0, 8.0] """ helper = LayerHelper('reduce_max', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] helper.append_op( type='reduce_max', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None else 0, + 'dim': dim if dim != None else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None else False }) @@ -2218,11 +3118,11 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): Args: input (Variable): The input variable which is a Tensor or LoDTensor. - dim (int|None): The dimension along which the minimum is computed. + dim (list|int|None): The dimensions along which the minimum is computed. If :attr:`None`, compute the minimum over all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the range :math:`[-rank(input), rank(input))`. - If :math:`dim < 0`, the dimension to reduce is :math:`rank + dim`. + If :math:`dim[i] < 0`, the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. @@ -2243,15 +3143,24 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_min(x, dim=0) # [0.1, 0.2, 0.5, 0.7] fluid.layers.reduce_min(x, dim=-1) # [0.2, 0.1] fluid.layers.reduce_min(x, dim=1, keep_dim=True) # [[0.2], [0.1]] + + # x is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_min(x, dim=[1, 2]) # [1.0, 5.0] + fluid.layers.reduce_min(x, dim=[0, 1]) # [1.0, 2.0] """ helper = LayerHelper('reduce_min', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] helper.append_op( type='reduce_min', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None else 0, + 'dim': dim if dim != None else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None else False }) @@ -2264,11 +3173,11 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): Args: input (Variable): The input variable which is a Tensor or LoDTensor. - dim (int|None): The dimension along which the product is performed. If + dim (list|int|None): The dimensions along which the product is performed. If :attr:`None`, multipy all elements of :attr:`input` and return a Tensor variable with a single element, otherwise must be in the - range :math:`[-rank(input), rank(input))`. If :math:`dim < 0`, - the dimension to reduce is :math:`rank + dim`. + range :math:`[-rank(input), rank(input))`. If :math:`dim[i] < 0`, + the dimension to reduce is :math:`rank + dim[i]`. keep_dim (bool|False): Whether to reserve the reduced dimension in the output Tensor. The result tensor will have one fewer dimension than the :attr:`input` unless :attr:`keep_dim` is true. @@ -2290,15 +3199,24 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): fluid.layers.reduce_prod(x, dim=-1) # [0.027, 0.0084] fluid.layers.reduce_prod(x, dim=1, keep_dim=True) # [[0.027], [0.0084]] + + # x is a Tensor variable with shape [2, 2, 2] and elements as below: + # [[[1.0, 2.0], [3.0, 4.0]], + # [[5.0, 6.0], [7.0, 8.0]]] + # Each example is followed by the correspending output tensor. + fluid.layers.reduce_prod(x, dim=[1, 2]) # [24.0, 1680.0] + fluid.layers.reduce_prod(x, dim=[0, 1]) # [105.0, 384.0] """ helper = LayerHelper('reduce_prod', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) + if dim is not None and not isinstance(dim, list): + dim = [dim] helper.append_op( type='reduce_prod', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None else 0, + 'dim': dim if dim != None else [0], 'keep_dim': keep_dim, 'reduce_all': True if dim == None else False }) @@ -2323,7 +3241,7 @@ def split(input, num_or_sections, dim=-1, name=None): will be named automatically. Returns: - List: The list of segmented tensor variables. + list(Variable): The list of segmented tensor variables. Examples: .. code-block:: python @@ -2333,7 +3251,8 @@ def split(input, num_or_sections, dim=-1, name=None): x0.shape # [3, 3, 5] x1.shape # [3, 3, 5] x2.shape # [3, 3, 5] - x0, x1, x2 = fluid.layers.split(x, num_or_sections=[2, 3, 4], dim=1) + x0, x1, x2 = fluid.layers.split( + x, num_or_sections=[2, 3, 4], dim=1) x0.shape # [3, 2, 5] x1.shape # [3, 3, 5] x2.shape # [3, 4, 5] @@ -2372,78 +3291,51 @@ def l2_normalize(x, axis, epsilon=1e-12, name=None): The l2 normalize layer normalizes `x` along dimension `axis` using an L2 norm. For a 1-D tensor (`dim` is fixed to 0), this layer computes - output = x / sqrt(max(sum(x**2), epsilon)) + .. math:: + + y = \\frac{x}{ \sqrt{\sum {x^2} + epsion }} For `x` with more dimensions, this layer independently normalizes each 1-D slice along dimension `axis`. Args: - x(Variable|list): The input tensor to l2_normalize layer. - axis(int): Dimension along which to normalize the input. - epsilon(float): A lower bound value for `x`'s l2 norm. sqrt(epsilon) will - be used as the divisor if the l2 norm of `x` is less than - sqrt(epsilon). - name(str|None): A name for this layer(optional). If set None, the layer - will be named automatically. - + x(Variable|list): The input tensor to l2_normalize layer. + axis(int): The axis on which to apply normalization. If `axis < 0`, \ + the dimension to normalization is rank(X) + axis. -1 is the + last dimension. + epsilon(float): The epsilon value is used to avoid division by zero, \ + the defalut value is 1e-10. + name(str|None): A name for this layer(optional). If set None, the layer \ + will be named automatically. Returns: - Variable: The output tensor variable. + Variable: The output tensor variable is the same shape with `x`. Examples: + .. code-block:: python - data = fluid.layers.data(name="data", - shape=(3, 17, 13), - dtype="float32") - normed = fluid.layers.l2_normalize(x=data, axis=1) + data = fluid.layers.data(name="data", + shape=(3, 17, 13), + dtype="float32") + normed = fluid.layers.l2_normalize(x=data, axis=1) """ if len(x.shape) == 1: axis = 0 - helper = LayerHelper("l2_normalize", **locals()) - square = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op(type="square", inputs={"X": x}, outputs={"Out": square}) - - reduced_sum = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_tmp_variable(dtype=x.dtype) + norm = helper.create_tmp_variable(dtype=x.dtype) helper.append_op( - type="reduce_sum", - inputs={"X": square}, - outputs={"Out": reduced_sum}, + type="norm", + inputs={"X": x}, + outputs={"Out": out, + "Norm": norm}, attrs={ - "dim": 1 if axis is None else axis, - "keep_dim": True, - "reduce_all": False + "axis": 1 if axis is None else axis, + "epsilon": epsilon, }) - - # TODO(caoying) A lower bound value epsilon for the norm is needed to - # imporve the numeric stability of reciprocal. This requires a maximum_op. - rsquare = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op( - type="reciprocal", inputs={"X": reduced_sum}, outputs={"Out": rsquare}) - - # TODO(caoying) the current elementwise_mul operator does not support a - # general broadcast rule which broadcasts input(Y) to have the same - # dimension with Input(X) starting from a specified dimension. So this - # exanpsion is requred. Once a general broadcast rule is spported, this - # expanding canbe removed. - rsquare_expanded = helper.create_tmp_variable(dtype=x.dtype) - expand_times = [1] * len(x.shape) - expand_times[axis] = int(x.shape[axis]) - helper.append_op( - type="expand", - inputs={"X": rsquare}, - outputs={"Out": rsquare_expanded}, - attrs={"expand_times": expand_times}) - - out = helper.create_tmp_variable(dtype=x.dtype) - helper.append_op( - type="elementwise_mul", - inputs={"X": x, - "Y": rsquare_expanded}, - outputs={"Out": out}) return out @@ -2553,28 +3445,56 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): return out -def topk(input, k): +def topk(input, k, name=None): """ This operator is used to find values and indices of the k largest entries for the last dimension. - If the input is a vector (rank=1), finds the k largest entries in the vector + If the input is a vector (1-D Tensor), finds the k largest entries in the vector and outputs their values and indices as vectors. Thus values[j] is the j-th largest entry in input, and its index is indices[j]. If the input is a Tensor with higher rank, this operator computes the top k entries along the last dimension. + For example: + + .. code-block:: text + + If: + input = [[5, 4, 2, 3], + [9, 7, 10, 25], + [6, 2, 10, 1]] + k = 2 + + Then: + The first output: + values = [[5, 4], + [10, 25], + [6, 10]] + + The second output: + indices = [[0, 1], + [2, 3], + [0, 2]] + Args: input(Variable): The input variable which can be a vector or Tensor with higher rank. - k(int): An integer value to specify the top k largest elements. + k(int): The number of top elements to look for along the last dimension + of input. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + Default: None Returns: - values(Variable): The k largest elements along each last dimensional - slice. - indices(Variable): The indices of values within the last dimension of - input. + Tuple[Variable]: A tuple with two elements. Each element is a Variable. + The first one is k largest elements along each last + dimensional slice. The second one is indices of values + within the last dimension of input. + + Raises: + ValueError: If k < 1 or k is not less than the last dimension of input Examples: .. code-block:: python @@ -2582,7 +3502,7 @@ def topk(input, k): top5_values, top5_indices = layers.topk(input, k=5) """ shape = input.shape - if k < 1 and k >= shape[-1]: + if k < 1 or k >= shape[-1]: raise ValueError("k must be greater than 0 and less than %d." % (shape[-1])) @@ -2600,8 +3520,7 @@ def topk(input, k): return values, indices -def edit_distance(input, label, normalized=True, ignored_tokens=None, - name=None): +def edit_distance(input, label, normalized=True, ignored_tokens=None): """ EditDistance operator computes the edit distances between a batch of hypothesis strings and their references. Edit distance, also called @@ -2615,26 +3534,23 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None, "kitten" -> "sitten" -> "sittin" -> "sitting" - Input(Hyps) is a LoDTensor consisting of all the hypothesis strings with + The input is a LoDTensor consisting of all the hypothesis strings with the total number denoted by `batch_size`, and the separation is specified by the LoD information. And the `batch_size` reference strings are arranged - in order in the same way in the LoDTensor Input(Refs). + in order in the same way in the input LoDTensor. - Output(Out) contains the `batch_size` results and each stands for the edit + The output contains the `batch_size` results and each stands for the edit distance for a pair of strings respectively. If Attr(normalized) is true, the edit distance will be divided by the length of reference string. Args: - input(Variable): The indices for hypothesis strings. - label(Variable): The indices for reference strings. - - normalized(bool): Indicated whether to normalize the edit distance by + normalized(bool, default True): Indicated whether to normalize the edit distance by the length of reference string. - - ignored_tokens(list of int): Tokens that should be removed before + ignored_tokens(list, default None): Tokens that should be removed before calculating edit distance. + name (str): The name of this layer. It is optional. Returns: Variable: sequence-to-sequence edit distance in shape [batch_size, 1]. @@ -2644,7 +3560,6 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None, x = fluid.layers.data(name='x', shape=[8], dtype='float32') y = fluid.layers.data(name='y', shape=[7], dtype='float32') - cost = fluid.layers.edit_distance(input=x,label=y) """ helper = LayerHelper("edit_distance", **locals()) @@ -2685,6 +3600,7 @@ def edit_distance(input, label, normalized=True, ignored_tokens=None, def ctc_greedy_decoder(input, blank, name=None): """ This op is used to decode sequences by greedy policy by below steps: + 1. Get the indexes of max value for each row in input. a.k.a. numpy.argmax(input, axis=0). 2. For each sequence in result of step1, merge repeated tokens between two @@ -2706,7 +3622,7 @@ def ctc_greedy_decoder(input, blank, name=None): [0.2, 0.2, 0.1, 0.5], [0.5, 0.1, 0.3, 0.1]] - input.lod = [[0, 4, 8]] + input.lod = [[4, 4]] Then: @@ -2714,7 +3630,7 @@ def ctc_greedy_decoder(input, blank, name=None): [1], [3]] - output.lod = [[0, 2, 3]] + output.lod = [[2, 1]] Args: @@ -2724,14 +3640,14 @@ def ctc_greedy_decoder(input, blank, name=None): where Lp is the sum of all input sequences' length and num_classes is the true number of classes. (not including the blank label). - blank(int): the blank label index of Connectionist Temporal Classification (CTC) loss, which is in thehalf-opened interval [0, num_classes + 1). + name (str): The name of this layer. It is optional. Returns: Variable: CTC greedy decode result. If all the sequences in result were - empty, the result LoDTensor will be [-1] with LoD [[0]] and dims [1, 1]. + empty, the result LoDTensor will be [-1] with LoD [[]] and dims [1, 1]. Examples: .. code-block:: python @@ -2764,35 +3680,33 @@ def warpctc(input, label, blank=0, norm_by_times=False): input tensor. Args: - input(Variable): (LodTensor, default: LoDTensor), - the unscaled probabilities of variable-length sequences, + input (Variable): The unscaled probabilities of variable-length sequences, which is a 2-D Tensor with LoD information. It's shape is [Lp, num_classes + 1], where Lp is the sum of all input sequences' length and num_classes is the true number of classes. (not including the blank label). - label(Variable): (LodTensor, default: LoDTensor), the ground truth - of variable-length sequence, which is a 2-D Tensor with LoD - information. It is of the shape [Lg, 1], where Lg is th sum of - all labels' length. - blank: (int, default: 0), the blank label index of Connectionist + label (Variable): The ground truth of variable-length sequence, + which is a 2-D Tensor with LoD information. It is of the shape [Lg, 1], + where Lg is th sum of all labels' length. + blank (int, default 0): The blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1). - norm_by_times: (bool, default: false), whether to normalize - the gradients by the number of time-step, which is also the - sequence's length. There is no need to normalize the gradients - if warpctc layer was follewed by a mean_op. + norm_by_times(bool, default false): Whether to normalize the gradients + by the number of time-step, which is also the sequence's length. + There is no need to normalize the gradients if warpctc layer was + follewed by a mean_op. Returns: Variable: The Connectionist Temporal Classification (CTC) loss, which is a 2-D Tensor of the shape [batch_size, 1]. Examples: + .. code-block:: python - y = layers.data( - name='y', shape=[11, 8], dtype='float32', lod_level=1) - y_predict = layers.data( - name='y_predict', shape=[11, 1], dtype='float32') - cost = layers.warpctc(input=y_predict, label=y) + + label = fluid.layers.data(shape=[11, 8], dtype='float32', lod_level=1) + predict = fluid.layers.data(shape=[11, 1], dtype='float32') + cost = fluid.layers.warpctc(input=predict, label=label) """ helper = LayerHelper('warpctc', **locals()) @@ -2822,16 +3736,20 @@ def sequence_reshape(input, new_dim): x is a LoDTensor: x.lod = [[0, 2, 6]] - x.data = [[1, 2], [3, 4], - [5, 6], [7, 8], [9, 10], [11, 12]] + x.data = [[1, 2], [3, 4], + [5, 6], [7, 8], + [9, 10], [11, 12]] x.dims = [6, 2] set new_dim = 4 then out is a LoDTensor: + out.lod = [[0, 1, 3]] - out.data = [[1, 2, 3, 4], - [5, 6, 7, 8], [9, 10, 11, 12]] + + out.data = [[1, 2, 3, 4], + [5, 6, 7, 8], + [9, 10, 11, 12]] out.dims = [3, 4] Currently, only 1-level LoDTensor is supported and please make sure @@ -2839,19 +3757,19 @@ def sequence_reshape(input, new_dim): no remainder for each sequence. Args: - input (Variable): (LodTensor, default: LoDTensor), a 2-D LoDTensor - with shape being [N, M] where M for dimension. - new_dim (int): New dimension which the input LoDTensor is reshaped to. + + input (Variable): A 2-D LoDTensor with shape being [N, M] where M for dimension. + new_dim (int): New dimension that the input LoDTensor is reshaped to. Returns: + Variable: Reshaped LoDTensor according to new dimension. Examples: .. code-block:: python - x = fluid.layers.data(name='x', shape=[5, 20], - dtype='float32', lod_level=1) - x_reshaped = layers.sequence_reshape(input=x, new_dim=10) + x = fluid.layers.data(shape=[5, 20], dtype='float32', lod_level=1) + x_reshaped = fluid.layers.sequence_reshape(input=x, new_dim=10) """ helper = LayerHelper('sequence_reshape', **locals()) out = helper.create_tmp_variable(helper.input_dtype()) @@ -2863,7 +3781,10 @@ def sequence_reshape(input, new_dim): return out -@autodoc() +# FIXME(wuyi): let docstring_checker.py understand @autodoc. +# For now, the comments in c++ use types like Tensor, but in python side +# the type is often "Variable", and arguments may vary. +@templatedoc(op_type="nce") def nce(input, label, num_total_classes, @@ -2871,6 +3792,49 @@ def nce(input, param_attr=None, bias_attr=None, num_neg_samples=None): + """ + ${comment} + + Args: + input (Variable): input variable. + label (Variable): label. + num_total_classes (int):${num_total_classes_comment} + sample_weight (Variable|None): A Variable of shape [batch_size, 1] + storing a weight for each sample. The default weight for each + sample is 1.0. + param_attr (ParamAttr|None): attributes for parameter + bias_attr (ParamAttr|None): attributes for bias + num_neg_samples (int): ${num_neg_samples_comment} + + Returns: + Variable: The output nce loss. + + Examples: + .. code-block:: python + + window_size = 5 + words = [] + for i in xrange(window_size): + words.append(layers.data( + name='word_{0}'.format(i), shape=[1], dtype='int64')) + + dict_size = 10000 + label_word = int(window_size / 2) + 1 + + embs = [] + for i in xrange(window_size): + if i == label_word: + continue + + emb = layers.embedding(input=words[i], size=[dict_size, 32], + param_attr='emb.w', is_sparse=True) + embs.append(emb) + + embs = layers.concat(input=embs, axis=1) + loss = layers.nce(input=embs, label=words[label_word], + num_total_classes=dict_size, param_attr='nce.w', + bias_attr='nce.b') + """ helper = LayerHelper('nce', **locals()) assert isinstance(input, Variable) dim = input.shape[1] @@ -2918,18 +3882,85 @@ def nce(input, return cost / (num_neg_samples + 1) -def transpose(x, perm, name=None): +def hsigmoid(input, label, num_classes, param_attr=None, bias_attr=None): """ - **transpose Layer** + The hierarchical sigmoid operator is used to accelerate the training + process of language model. This operator organizes the classes into a + complete binary tree, each leaf node represents a class(a word) and each + internal node acts as a binary classifier. For each word there's a unique + path from root to it's leaf node, hsigmoid calculate the cost for each + internal node on the path, and sum them to get a total cost. hsigmoid can + achive a acceleration from :math:`O(N)` to :math:`O(logN)`, where :math:`N` + represents the size of word dict. + + Refer to `Hierarchical Probabilistic Neural Network Language Model + `_ + + Args: + input (Variable): The input tensor variable with shape + :math:`[N \\times D]`, where :math:`N` is the size of mini-batch, + and :math:`D` is the feature size. + label (Variable): The tensor variable contains labels of training data. + It's a tensor with shape is :math:`[N \\times 1]`. + num_classes: (int), The number of classes, must not be less than 2. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter + attribute for learnable parameters/weights of this layer. + bias_attr (ParamAttr|list of ParamAttr, default None): The parameter + attribute for the bias of this layer. If it is set to False, no + bias will be applied. + + Returns: + Out: (Tensor) The cost of hierarchical sigmoid operator. the shape is [N, 1] + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name='x', shape=[2], dtype='float32') + y = fluid.layers.data(name='y', shape=[1], dtype='int64') + out = fluid.layers.hsigmoid(input=x, label=y, num_classes=6) + """ + + helper = LayerHelper('hierarchical_sigmoid', **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + pre_out = helper.create_tmp_variable(dtype) + dim = input.shape[1] + if num_classes < 2: + raise ValueError("num_classes must not be less than 2.") + weights = helper.create_parameter( + attr=helper.param_attr, + shape=[num_classes - 1, dim], + is_bias=False, + dtype=input.dtype) + inputs = {"X": input, "W": weights, "Label": label} + if helper.bias_attr: + bias = helper.create_parameter( + attr=helper.bias_attr, + shape=[1, num_classes - 1], + is_bias=True, + dtype=input.dtype) + inputs['Bias'] = bias + helper.append_op( + type="hierarchical_sigmoid", + inputs=inputs, + outputs={"Out": out, + "PreOut": pre_out}, + attrs={"num_classes": num_classes}) + return out + +def transpose(x, perm, name=None): + """ Permute the dimensions of `input` according to `perm`. The `i`-th dimension of the returned tensor will correspond to the perm[i]-th dimension of `input`. Args: - input (Variable): (Tensor), A Tensor. - perm (list): A permutation of the dimensions of `input`. + x (Variable): The input Tensor. + perm (list): A permutation of the dimensions of `input`. + name (str): The name of this layer. It is optional. Returns: Variable: A transposed Tensor. @@ -2962,7 +3993,13 @@ def transpose(x, perm, name=None): return out -def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): +def im2sequence(input, + filter_size=1, + stride=1, + padding=0, + input_image_size=None, + out_stride=1, + name=None): """ Extracts image patches from the input tensor to form a tensor of shape {input.batch_size * output_height * output_width, filter_size_H * @@ -2999,6 +4036,15 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): padding_up = padding_down = padding_left = padding_right = padding Default: padding = 0. + input_image_size(Variable): the input contains image real size.It's dim + is [batchsize, 2]. It is dispensable.It is just for batch inference. + + out_stride(int|tuple): The scaling of image through CNN. It is + dispensable. It is valid only when input_image_size is not null. + If out_stride is tuple, it must contain two intergers, + (out_stride_H, out_stride_W). Otherwise, + the out_stride_H = out_stride_W = out_stride. + name (int): The name of this layer. It is optional. Returns: @@ -3010,8 +4056,6 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): Examples: - As an example: - .. code-block:: text Given: @@ -3051,11 +4095,11 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): [ 5. 7. 2. 4. 1. 3. 9. 0.] [ 7. 9. 4. 8. 3. 5. 0. 8.]] - output.dims = {8, 9} + output.dims = {8, 8} - output.lod = [[0, 4, 8]] + output.lod = [[4, 4]] - The simple usage is: + Examples: .. code-block:: python @@ -3073,44 +4117,27 @@ def im2sequence(input, filter_size=1, stride=1, padding=0, name=None): if len(padding) == 2: padding.append(padding[0]) padding.append(padding[1]) + inputs = {"X": input} + attrs = {"kernels": filter_size, "strides": stride, "padding": padding} + if input_image_size: + if isinstance(out_stride, int): + out_stride = [out_stride, out_stride] + inputs["Y"] = input_image_size + attrs["out_stride"] = out_stride + helper = LayerHelper('im2sequence', **locals()) + out = helper.create_tmp_variable(dtype=helper.input_dtype()) + helper.append_op( + type='im2sequence', inputs=inputs, outputs={'Out': out}, attrs=attrs) + return out - helper = LayerHelper('im2sequence', **locals()) - out = helper.create_tmp_variable(dtype=helper.input_dtype()) - helper.append_op( - type='im2sequence', - inputs={'X': input}, - outputs={'Out': out}, - attrs={ - 'kernels': filter_size, - 'strides': stride, - 'paddings': padding, - }) - return out - - -def row_conv(input, future_context_size, param_attr=None, act=None): - """Row Conv Operator. This layer will apply lookahead convolution to - **input**. The input variable should be a 2D LoDTensor with shape [T, D]. - Parameters with shape [future_context_size + 1, D] will be created. The math - equation of row convolution is as follows: - - .. math:: - Out_{i} = \sum_{j = i} ^ {i + \\tau} X_{j} \odot W_{i - j} - - In the above equation: - - * :math:`Out_{i}`: The i-th row of output variable with shape [1, D]. - * :math:`\\tau`: Future context size. - * :math:`X_{j}`: The j-th row of input variable with shape [1, D]. - * :math:`W_{i-j}`: The (i-j)-th row of parameters with shape [1, D]. - More details about row_conv please refer to the paper \ - (http://www.cs.cmu.edu/~dyogatam/papers/wang+etal.iclrworkshop2016.pdf) and - the design document \ - (https://github.com/PaddlePaddle/Paddle/issues/2228#issuecomment-303903645). +@templatedoc() +def row_conv(input, future_context_size, param_attr=None, act=None): + """ + ${comment} Args: - input (Variable): Input variable, a 2D LoDTensor with shape [T, D]. + input (${x_type}): ${x_comment}. future_context_size (int): Future context size. Please note, the shape of convolution kernel is [future_context_size + 1, D]. param_attr (ParamAttr): Attributes of parameters, including @@ -3118,14 +4145,13 @@ def row_conv(input, future_context_size, param_attr=None, act=None): act (str): Non-linear activation to be applied to output variable. Returns: - Variable: The output tensor with same shape as input tensor. + ${out_comment}. Examples: - .. code-block:: python - - x = fluid.layers.data(name='x', shape=[16], - dtype='float32', lod_level=1) - out = fluid.layers.row_conv(input=x, future_context_size=2) + >>> import paddle.fluid as fluid + >>> x = fluid.layers.data(name='x', shape=[16], + >>> dtype='float32', lod_level=1) + >>> out = fluid.layers.row_conv(input=x, future_context_size=2) """ helper = LayerHelper('row_conv', **locals()) dtype = helper.input_dtype() @@ -3141,42 +4167,23 @@ def row_conv(input, future_context_size, param_attr=None, act=None): return helper.append_activation(out) +@templatedoc() def multiplex(inputs, index): """ - **Multiplex Layer** + ${comment} - Referring to the given index variable, this layer selects rows from the - input variables to construct a multiplex variable. Assuming that there are - :math:`m` input variables and :math:`I_i` represents the i-th input - variable and :math:`i` is in [0, :math:`m`). All input variables are - tensors with same shape [:math:`d_0`, :math:`d_1`, ..., :math:`d_R`]. - Please note that rank of the input tensor should be at least 2. Each input - variable will be treated as a 2-D matrix with shape [:math:`M`, :math:`N`] - where :math:`M` for :math:`d_0` and :math:`N` for :math:`d_1` * :math:`d_2` - * ... * :math:`d_R`. Let :math:`I_i[j]` be the j-th row of the i-th input - variable. The given index variable should be a 2-D tensor with shape - [:math:`M`, 1]. Let `ID[i]` be the i-th index value of the index variable. - Then the output variable will be a tensor with shape [:math:`d_0`, - :math:`d_1`, ..., :math:`d_R`]. If we treat the output tensor as a 2-D - matrix with shape [:math:`M`, :math:`N`] and let :math:`O[i]` be the i-th - row of the matrix, then `O[i]` is equal to :math:`I_{ID[i]}[i]`. + >>> import paddle.fluid as fluid + >>> x1 = fluid.layers.data(name='x1', shape=[4], dtype='float32') + >>> x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') + >>> index = fluid.layers.data(name='index', shape=[1], dtype='int32') + >>> out = fluid.layers.multiplex(inputs=[x1, x2], index=index) Args: - inputs (list): A list of variables to gather from. All variables have the - same shape and the rank is at least 2. - index (Variable): Tensor, index variable which is a 2-D tensor - with shape [M, 1] where M is the batch size. + inputs (list): ${x_comment}. + index (${ids_type}): ${ids_comment}. Returns: - Variable: Multiplex variable gathered from input variables. - - Examples: - .. code-block:: python - - x1 = fluid.layers.data(name='x1', shape=[4], dtype='float32') - x2 = fluid.layers.data(name='x2', shape=[4], dtype='float32') - index = fluid.layers.data(name='index', shape=[1], dtype='int32') - out = fluid.layers.multiplex(inputs=[x1, x2], index=index) + ${out_comment}. """ helper = LayerHelper('multiplex', **locals()) @@ -3244,7 +4251,8 @@ def softmax_with_cross_entropy(logits, label, soft_label=False): data = fluid.layers.data(name='data', shape=[128], dtype='float32') label = fluid.layers.data(name='label', shape=[1], dtype='int64') fc = fluid.layers.fc(input=data, size=100) - out = fluid.layers.softmax_with_cross_entropy(logits=fc, label=label) + out = fluid.layers.softmax_with_cross_entropy( + logits=fc, label=label) """ helper = LayerHelper('softmax_with_cross_entropy', **locals()) softmax = helper.create_tmp_variable(dtype=logits.dtype) @@ -3261,40 +4269,41 @@ def softmax_with_cross_entropy(logits, label, soft_label=False): def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): """ - **Smooth L1 Loss Operator. ** - - This operator computes the smooth l1 loss for X and Y. - The operator takes the first dimension of X and Y as batch size. - For each instance, it computes the smooth l1 loss element by element first - and then sums all the losses. So the shape of Out is [batch_size, 1]. + This layer computes the smooth L1 loss for Variable :attr:`x` and :attr:`y`. + It takes the first dimension of :attr:`x` and :attr:`y` as batch size. + For each instance, it computes the smooth L1 loss element by element first + and then sums all the losses. So the shape of ouput Variable is + [batch_size, 1]. Args: x (Variable): A tensor with rank at least 2. The input value of smooth - l1 loss op with shape [batch_size, dim1, ..., dimN]. + L1 loss op with shape [batch_size, dim1, ..., dimN]. y (Variable): A tensor with rank at least 2. The target value of smooth - l1 loss op with same shape as x. + L1 loss op with same shape as :attr:`x`. inside_weight (Variable|None): A tensor with rank at least 2. This - input is optional and should have same shape with x. If provided, - the result of (x - y) will be multiplied by this tensor element by - element. + input is optional and should have same shape with :attr:`x`. If + provided, the result of (:attr:`x` - :attr:`y`) will be multiplied + by this tensor element by element. outside_weight (Variable|None): A tensor with rank at least 2. This - input is optional and should have same shape with x. If provided, - the out smooth l1 loss will be multiplied by this tensor element - by element. - sigma (float|None): Hyper parameter of smooth l1 loss op. A float scalar - with default value 1.0. + input is optional and should have same shape with :attr:`x`. If + provided, the out smooth L1 loss will be multiplied by this tensor + element by element. + sigma (float|None): Hyper parameter of smooth L1 loss layer. A float + scalar with default value 1.0. + Returns: - Variable: A tensor with rank be 2. The output smooth l1 loss with - shape [batch_size, 1]. + Variable: The output smooth L1 loss with shape [batch_size, 1]. Examples: .. code-block:: python data = fluid.layers.data(name='data', shape=[128], dtype='float32') - label = fluid.layers.data(name='label', shape=[100], dtype='int64') + label = fluid.layers.data( + name='label', shape=[100], dtype='float32') fc = fluid.layers.fc(input=data, size=100) out = fluid.layers.smooth_l1(x=fc, y=label) """ + helper = LayerHelper('smooth_l1_loss', **locals()) diff = helper.create_tmp_variable(dtype=x.dtype) loss = helper.create_tmp_variable(dtype=x.dtype) @@ -3314,32 +4323,20 @@ def smooth_l1(x, y, inside_weight=None, outside_weight=None, sigma=None): def one_hot(input, depth): """ - One Hot Operator. This operator creates the one-hot representations for input - index values. The following example will help to explain the function of this - operator. + This layer creates the one-hot representations for input indices. Args: - input(variable): A Tensor/LodTensor of indices, last dimension must be 1. - depth(scalar): an interger defining the depth of the one hot dimension. + input(Variable): Input indices, last dimension must be 1. + depth(scalar): An interger defining the depth of the one-hot dimension. Returns: - The one-hot tensor or LodTensor, same as input. + Variable: The one-hot representations of input. Examples: .. code-block:: python - X is a LoDTensor: - X.lod = [[0, 1, 4]] - X.shape = [4, 1] - X.data = [[1], [1], [3], [0]] - set depth = 4 - Out is a LoDTensor: - Out.lod = [[0, 1, 4]] - Out.shape = [4, 4] - Out.data = [[0., 1., 0., 0.], - [0., 1., 0., 0.], - [0., 0., 0., 1.], - [1., 0., 0., 0.]] + label = layers.data(name="label", shape=[1], dtype="float32") + one_hot_label = layers.one_hot(input=label, depth=10) """ helper = LayerHelper("one_hot", **locals()) one_hot_out = helper.create_tmp_variable(dtype='float32') @@ -3353,15 +4350,23 @@ def one_hot(input, depth): def autoincreased_step_counter(counter_name=None, begin=1, step=1): """ - NOTE: The counter will be automatically increased by 1 every mini-batch - Return the run counter of the main program, which is started with 1. + Create an auto-increase variable + which will be automatically increased by 1 every mini-batch + Return the run counter of the main program, default is started from 1. Args: counter_name(str): The counter name, default is '@STEP_COUNTER@'. begin(int): The first value of this counter. step(int): The increment step between each execution. - Returns(Variable): The global run counter. + Returns: + Variable: The global run counter. + + Examples: + .. code-block:: python + + global_step = fluid.layers.autoincreased_step_counter( + counter_name='@LR_DECAY_COUNTER@', begin=begin, step=1) """ helper = LayerHelper('global_step_counter') if counter_name is None: @@ -3372,7 +4377,7 @@ def autoincreased_step_counter(counter_name=None, begin=1, step=1): helper.set_variable_initializer( counter, initializer=Constant( value=begin - 1, force_cpu=True)) - helper.main_program.global_block().prepend_op( + helper.main_program.global_block()._prepend_op( type='increment', inputs={'X': [counter]}, outputs={'Out': [counter]}, @@ -3422,7 +4427,7 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): the corresponding dimension of x. Args: - input(variable): The input tensor. + x(variable): The input tensor. shape(list): The new shape. At most one dimension of the new shape can be -1. actual_shape(variable): An optional input. If provided, reshape @@ -3431,11 +4436,17 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): say :attr:`actual_shape` has a higher priority than :attr:`shape`. act (str): The non-linear activation to be applied to output variable. - inplace(bool): If this flag is set true, a new output tensor is created - whose data is copied from input x, otherwise the output - shares data with input without copying. + inplace(bool): If this flag is set true, the output + shares data with input without copying, otherwise + a new output tensor is created + whose data is copied from input x. + name (str): The name of this layer. It is optional. + + Returns: + Variable: The output tensor. - Returns(variable): The output tensor. + Raises: + TypeError: if actual_shape is neither Variable nor None. Examples: .. code-block:: python @@ -3448,6 +4459,11 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): if not (isinstance(shape, list) or isinstance(shape, tuple)): raise ValueError("Input shape must be a python lsit or tuple.") + inputs = {"X": x} + if isinstance(actual_shape, Variable): + inputs["Shape"] = actual_shape + elif actual_shape is not None: + raise TypeError("actual_shape should either be Variable or None") # Validate the shape unk_dim_idx = -1 @@ -3465,88 +4481,86 @@ def reshape(x, shape, actual_shape=None, act=None, inplace=True, name=None): "except one unknown dimension.") helper = LayerHelper("reshape", **locals()) - reshaped = helper.create_tmp_variable(dtype=x.dtype) + out = helper.create_tmp_variable(dtype=x.dtype) helper.append_op( type="reshape", - inputs={"X": x, - "Shape": actual_shape} - if isinstance(actual_shape, Variable) else {"X": x}, - attrs={"shape": shape, - "inplace": inplace}, - outputs={"Out": reshaped}) + inputs=inputs, + attrs={"shape": shape}, + outputs={"Out": out}) - return helper.append_activation(reshaped) + return helper.append_activation(out) def lod_reset(x, y=None, target_lod=None): """ - LoD Reset Operator. Set LoD of **x** to a new one specified by **y** or - **target_lod**. When **y** provided, **y.lod** would be considered as target - LoD first, otherwise **y.data** would be considered as target LoD. If **y** - is not provided, target LoD should be specified by **target_lod**. - If target LoD is specified by **Y.data** or **target_lod**, only one level - LoD is supported. + Set LoD of :attr:`x` to a new one specified by :attr:`y` or + :attr:`target_lod`. When :attr:`y` provided, :attr:`y.lod` would be + considered as target LoD first, otherwise :attr:`y.data` would be + considered as target LoD. If :attr:`y` is not provided, target LoD should + be specified by :attr:`target_lod`. If target LoD is specified by + :attr:`Y.data` or :attr:`target_lod`, only one level LoD is supported. .. code-block:: text * Example 1: Given a 1-level LoDTensor x: - x.lod = [[ 0, 2, 5 6 ]] + x.lod = [[ 2, 3, 1 ]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] - target_lod: [0, 4, 6] + target_lod: [4, 2] then we get a 1-level LoDTensor: - out.lod = [[ 0, 4, 6 ]] + out.lod = [[4, 2]] out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] out.dims = [6, 1] * Example 2: Given a 1-level LoDTensor x: - x.lod = [[ 0, 2, 5 6 ]] + x.lod = [[2, 3, 1]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] y is a Tensor: - y.data = [[0, 2, 6]] + y.data = [[2, 4]] y.dims = [1, 3] then we get a 1-level LoDTensor: - out.lod = [[ 0, 2, 6 ]] + out.lod = [[2, 4]] out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] out.dims = [6, 1] * Example 3: Given a 1-level LoDTensor x: - x.lod = [[ 0, 2, 5 6 ]] + x.lod = [[2, 3, 1]] x.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] x.dims = [6, 1] y is a 2-level LoDTensor: - y.lod = [[0, 2, 4], [0, 2, 5, 6]] + y.lod = [[2, 2], [2, 2, 1, 1]] y.data = [[1.1], [2.1], [3.1], [4.1], [5.1], [6.1]] y.dims = [6, 1] then we get a 2-level LoDTensor: - out.lod = [[0, 2, 4], [0, 2, 5, 6]] + out.lod = [[2, 2], [2, 2, 1, 1]] out.data = [[1.0], [2.0], [3.0], [4.0], [5.0], [6.0]] out.dims = [6, 1] Args: x (Variable): Input variable which could be a Tensor or LodTensor. - y (Variable|None): If provided, output's LoD would be derived from y. + y (Variable|None): If provided, output's LoD would be derived + from :attr:`y`. target_lod (list|tuple|None): One level LoD which should be considered - as target LoD when y not provided. + as target LoD when :attr:`y` not provided. Returns: - Variable: Output variable with LoD specified by this operator. + Variable: Output variable with LoD specified by this layer. Raises: - ValueError: If y and target_lod are both None. + ValueError: If :attr:`y` and :attr:`target_lod` are both None. Examples: .. code-block:: python @@ -3582,9 +4596,7 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): .. math:: - Output(i, x, y) = Input(i, x, y) / \left( - k + \alpha \sum\limits^{\min(C, c + n/2)}_{j = \max(0, c - n/2)} - (Input(j, x, y))^2 \right)^{\beta} + Output(i, x, y) = Input(i, x, y) / \\left(k + \\alpha \\sum\\limits^{\\min(C, c + n/2)}_{j = \\max(0, c - n/2)}(Input(j, x, y))^2\\right)^{\\beta} In the above equation: @@ -3613,7 +4625,8 @@ def lrn(input, n=5, k=1.0, alpha=1e-4, beta=0.75, name=None): Examples: .. code-block:: python - data = fluid.layers.data(name="data", shape=[3, 112, 112], dtype="float32") + data = fluid.layers.data( + name="data", shape=[3, 112, 112], dtype="float32") lrn = fluid.layers.lrn(input=data) """ helper = LayerHelper('lrn', **locals()) @@ -3767,37 +4780,25 @@ def label_smooth(label, return smooth_label +@templatedoc() def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): """ - Region of interest pooling (also known as RoI pooling) is to perform - is to perform max pooling on inputs of nonuniform sizes to obtain - fixed-size feature maps (e.g. 7*7). - The operator has three steps: - 1. Dividing each region proposal into equal-sized sections with - the pooled_width and pooled_height - 2. Finding the largest value in each section - 3. Copying these max values to the output buffer + ${comment} Args: - input (Variable): The input for ROI pooling. - rois (Variable): ROIs (Regions of Interest) to pool over. It should - be a 2-D one level LoTensor of shape [num_rois, 4]. - The layout is [x1, y1, x2, y2], where (x1, y1) - is the top left coordinates, and (x2, y2) is the - bottom right coordinates. The num_rois is the - total number of ROIs in this batch data. - pooled_height (integer): The pooled output height. Default: 1 - pooled_width (integer): The pooled output width. Default: 1 - spatial_scale (float): Multiplicative spatial scale factor. To - translate ROI coords from their input scale - to the scale used when pooling. Default: 1.0 + input (Variable): ${x_comment} + rois (Variable): ROIs (Regions of Interest) to pool over. + pooled_height (integer): ${pooled_height_comment} Default: 1 + pooled_width (integer): ${pooled_width_comment} Default: 1 + spatial_scale (float): ${spatial_scale_comment} Default: 1.0 Returns: - pool_out (Variable): The output is a 4-D tensor of the shape - (num_rois, channels, pooled_h, pooled_w). + Variable: ${out_comment}. Examples: - pool_out = fluid.layers.roi_pool(input=x, rois=rois, 7, 7, 1.0) + .. code-block:: python + + pool_out = fluid.layers.roi_pool(input=x, rois=rois, 7, 7, 1.0) """ helper = LayerHelper('roi_pool', **locals()) dtype = helper.input_dtype() @@ -3815,3 +4816,548 @@ def roi_pool(input, rois, pooled_height=1, pooled_width=1, spatial_scale=1.0): "spatial_scale": spatial_scale }) return pool_out + + +def dice_loss(input, label, epsilon=0.00001): + """ + Dice loss for comparing the similarity of two batch of data, + usually is used for binary image segmentation i.e. labels are binary. + The dice loss can be defined as below equation: + + .. math:: + + dice\_loss &= 1 - \\frac{2 * intersection\_area}{total\_area} \\\\ + &= \\frac{(total\_area - intersection\_area) - intersection\_area}{total\_area} \\\\ + &= \\frac{(union\_area - intersection\_area)}{total\_area} + + + Args: + input (Variable): The predictions with rank>=2. The first dimension is batch size, + and the last dimension is class number. + label (Variable): The groud truth with the same rank with input. The first dimension + is batch size, and the last dimension is 1. + epsilon (float): The epsilon will be added to the numerator and denominator. + If both input and label are empty, it makes sure dice is 1. + Default: 0.00001 + + Returns: + dice_loss (Variable): The dice loss with shape [1]. + + Examples: + .. code-block:: python + + predictions = fluid.layers.softmax(x) + loss = fluid.layers.dice_loss(input=predictions, label=label, 2) + """ + label = one_hot(label, depth=input.shape[-1]) + reduce_dim = list(range(1, len(input.shape))) + inse = reduce_sum(input * label, dim=reduce_dim) + dice_denominator = reduce_sum( + input, dim=reduce_dim) + reduce_sum( + label, dim=reduce_dim) + dice_score = 1 - inse * 2 / (dice_denominator + epsilon) + return reduce_mean(dice_score) + + +def image_resize(input, + out_shape=None, + scale=None, + name=None, + resample='BILINEAR'): + """ + **Resize a Batch of Images** + + The input must be a tensor of the shape (num_batches, channels, in_h, in_w), + and the resizing only applies on the last two dimensions(hight and width). + + Supporting resample methods: + + 'BILINEAR' : Bilinear interpolation + + Args: + input (Variable): The input tensor of image resize layer, + This is a 4-D tensor of the shape + (num_batches, channels, in_h, in_w). + out_shape(list|tuple|Variable|None): Output shape of image resize + layer, the shape is (out_h, out_w). + Default: None + scale(float|None): The multiplier for the input height or width. + At least one of out_shape or scale must be set. + And out_shape has a higher priority than scale. + Default: None + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + resample(str): The resample method. It can only be 'BILINEAR' currently. + Default: 'BILINEAR' + + Returns: + Variable: The output is a 4-D tensor of the shape + (num_batches, channls, out_h, out_w). + + Examples: + .. code-block:: python + + out = fluid.layers.image_resize(input, out_shape=[12, 12]) + """ + resample_methods = {'BILINEAR': 'bilinear_interp'} + if resample not in resample_methods: + raise ValueError( + "The 'resample' of image_resize can only be 'BILINEAR' currently.") + if out_shape is None and scale is None: + raise ValueError("One of out_shape and scale must not be None") + helper = LayerHelper('bilinear_interp', **locals()) + dtype = helper.input_dtype() + + def _is_list_or_turple_(data): + return (isinstance(data, list) or isinstance(data, tuple)) + + out_h = 0 + out_w = 0 + inputs = {"X": input} + if out_shape is not None: + if not (_is_list_or_turple_(out_shape) and + len(out_shape) == 2) and not isinstance(out_shape, Variable): + raise ValueError('out_shape should be a list or tuple or variable') + if _is_list_or_turple_(out_shape): + out_shape = list(map(int, out_shape)) + out_h = out_shape[0] + out_w = out_shape[1] + else: + inputs['OutSize'] = out_shape + else: + out_h = int(input.shape[2] * scale) + out_w = int(input.shape[3] * scale) + + out = helper.create_tmp_variable(dtype) + helper.append_op( + type=resample_methods[resample], + inputs=inputs, + outputs={"Out": out}, + attrs={"out_h": out_h, + "out_w": out_w}) + return out + + +@templatedoc(op_type="bilinear_interp") +def resize_bilinear(input, out_shape=None, scale=None, name=None): + """ + ${comment} + + Args: + input(${x_type}): ${x_comment}. + + out_shape(${out_size_type}): ${out_size_comment}. + + scale(float|None): The multiplier for the input height or width. At + least one of out_shape or scale must be set. And out_shape has + a higher priority than scale. Default: None. + + name(str|None): The output variable name. + + Returns: + ${out_comment}. + """ + + return image_resize(input, out_shape, scale, name, 'BILINEAR') + + +def image_resize_short(input, out_short_len, resample='BILINEAR'): + """ + Resize a batch of images. The short edge of input images will be + resized to the given 'out_short_len'. The long edge of input images + will be resized proportionately to make images' length-width ratio + constant. + + Args: + input (Variable): The input tensor of image resize layer, + This is a 4-D tensor of the shape + (num_batches, channels, in_h, in_w). + out_short_len(int): The length of output images' short edge. + resample (str): resample method, default: BILINEAR. + + Returns: + Variable: The output is a 4-D tensor of the shape + (num_batches, channls, out_h, out_w). + """ + in_shape = input.shape + if len(in_shape) != 4: + raise ValueError( + "The rank of input must be 4 (num_batches, channels, in_h, in_w).") + hw = in_shape[2:4] + short_idx = hw.index(min(hw)) + long_idx = 1 - short_idx + out_shape = list(hw) + out_shape[short_idx] = out_short_len + out_shape[long_idx] = int( + float(out_shape[long_idx]) * (float(out_short_len) / float(hw[ + short_idx])) + 0.5) + return image_resize(input=input, out_shape=out_shape, resample=resample) + + +def gather(input, index): + """ + **Gather Layer** + + Output is obtained by gathering entries of the outer-most dimension + of X indexed by `index` and concatenate them together. + + .. math:: + + Out = X[Index] + + + .. code-block:: text + + + Given: + + X = [[1, 2], + [3, 4], + [5, 6]] + + Index = [1, 2] + + Then: + + Out = [[3, 4], + [5, 6]] + + Args: + input (Variable): The source input with rank>=1. + index (Variable): The index input with rank=1. + + Returns: + output (Variable): The output is a tensor with the same rank as input. + + Examples: + + .. code-block:: python + + output = fluid.layers.gather(x, index) + """ + helper = LayerHelper('gather', **locals()) + dtype = helper.input_dtype() + out = helper.create_tmp_variable(dtype) + helper.append_op( + type="gather", + inputs={"X": input, + "Index": index}, + outputs={"Out": out}) + return out + + +@templatedoc() +def random_crop(x, shape, seed=None): + """ + ${comment} + + Args: + x(${x_type}): ${x_comment} + shape(${shape_type}): ${shape_comment} + seed(int|${seed_type}|None): ${seed_comment} By default, the seed will + get from `random.randint(-65536, 65535)`. + + Returns: + ${out_comment} + + Examples: + >>> img = fluid.layers.data("img", [3, 256, 256]) + >>> cropped_img = fluid.layers.random_crop(img, shape=[3, 224, 224]) + """ + helper = LayerHelper("random_crop", **locals()) + dtype = x.dtype + out = helper.create_tmp_variable(dtype) + if seed is None: + seed = random.randint(-65536, 65535) + op_attrs = {"shape": shape} + if isinstance(seed, int): + op_attrs["startup_seed"] = seed + seed = helper.create_variable( + name=unique_name.generate("random_crop_seed"), + dtype="int64", + persistable=True) + elif not isinstance(seed, Variable): + raise ValueError("'seed' must be a Variable or an int.") + helper.append_op( + type="random_crop", + inputs={"X": x, + "Seed": seed}, + outputs={"Out": out, + "SeedOut": seed}, + attrs=op_attrs) + return out + + +def log(x): + """ + Calculates the natural log of the given input tensor, element-wise. + + .. math:: + + Out = \\ln(x) + + Args: + x (Variable): Input tensor. + + Returns: + Variable: The natural log of the input tensor computed element-wise. + + Examples: + + .. code-block:: python + + output = fluid.layers.log(x) + """ + helper = LayerHelper('log', **locals()) + dtype = helper.input_dtype(input_param_name='x') + out = helper.create_tmp_variable(dtype) + helper.append_op(type="log", inputs={"X": x}, outputs={"Out": out}) + return out + + +def relu(x): + """ + Relu takes one input data (Tensor) and produces one output data (Tensor) + where the rectified linear function, y = max(0, x), is applied to + the tensor elementwise. + + .. math:: + + Out = \\max(0, x) + + Args: + x (Variable): The input tensor. + + Returns: + Variable: The output tensor with the same shape as input. + + Examples: + + .. code-block:: python + + output = fluid.layers.relu(x) + """ + helper = LayerHelper('relu', **locals()) + dtype = helper.input_dtype(input_param_name='x') + out = helper.create_tmp_variable(dtype) + helper.append_op(type="relu", inputs={"X": x}, outputs={"Out": out}) + return out + + +def mean_iou(input, label, num_classes): + """ + Mean Intersection-Over-Union is a common evaluation metric for + semantic image segmentation, which first computes the IOU for each + semantic class and then computes the average over classes. + IOU is defined as follows: + + .. math:: + + IOU = \\frac{true\_positiv}{(true\_positive + false\_positive + false\_negative)}. + + The predictions are accumulated in a confusion matrix and mean-IOU + is then calculated from it. + + + Args: + input (Variable): A Tensor of prediction results for semantic labels with type int32 or int64. + label (Variable): A Tensor of ground truth labels with type int32 or int64. + Its shape should be the same as input. + num_classes (int): The possible number of labels. + + Returns: + mean_iou (Variable): A Tensor representing the mean intersection-over-union with shape [1]. + out_wrong(Variable): A Tensor with shape [num_classes]. The wrong numbers of each class. + out_correct(Variable): A Tensor with shape [num_classes]. The correct numbers of each class. + + Examples: + + .. code-block:: python + + iou, wrongs, corrects = fluid.layers.mean_iou(predict, label, num_classes) + """ + helper = LayerHelper('mean_iou', **locals()) + dtype = helper.input_dtype() + out_mean_iou = helper.create_tmp_variable(dtype='float32') + out_wrong = helper.create_tmp_variable(dtype='int32') + out_correct = helper.create_tmp_variable(dtype='int32') + helper.append_op( + type="mean_iou", + inputs={"Predictions": input, + "Labels": label}, + outputs={ + "OutMeanIou": out_mean_iou, + "OutWrong": out_wrong, + "OutCorrect": out_correct + }, + attrs={"num_classes": num_classes}) + return out_mean_iou, out_wrong, out_correct + + +def crop(x, shape=None, offsets=None, name=None): + """ + Crop input into output, as specified by offsets and shape. + + .. code-block:: text + + * Case 1: + Given + X = [[0, 1, 2, 0, 0] + [0, 3, 4, 0, 0] + [0, 0, 0, 0, 0]], + and + shape = [2, 2], + offsets = [0, 1], + output is: + Out = [[1, 2], + [3, 4]]. + * Case 2: + Given + X = [[0, 1, 2, 5, 0] + [0, 3, 4, 6, 0] + [0, 0, 0, 0, 0]], + and shape is tensor + shape = [[0, 0, 0] + [0, 0, 0]] + and + offsets = [0, 1], + + output is: + Out = [[1, 2, 5], + [3, 4, 6]]. + + Args: + x (Variable): The input tensor variable. + shape (Variable|list/tuple of integer): The output shape is specified + by `shape`, which can a Variable or a list/tupe of integer. + If a tensor Variable, it's rank must be the same as `x`. This way + is suitable for the case that the output shape may be changed each + iteration. If a list/tupe of integer, it's length must be the same + as the rank of `x` + offsets (Variable|list/tuple of integer|None): Specifies the copping + offsets at each dimension. It can be a Variable or or a list/tupe + of integer. If a tensor Variable, it's rank must be the same as `x`. + This way is suitable for the case that the offsets may be changed + each iteration. If a list/tupe of integer, it's length must be the + same as the rank of `x`. If None, the offsets are 0 at each + dimension. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + Variable: The cropped tensor variable. + + Raises: + ValueError: If shape is not a list, tuple or Variable. + + Examples: + + .. code-block:: python + + x = fluid.layers.data(name="x", shape=[3, 5], dtype="float32") + y = fluid.layers.data(name="y", shape=[2, 3], dtype="float32") + crop = fluid.layers.crop(x, shape=y) + + # or + z = fluid.layers.data(name="z", shape=[3, 5], dtype="float32") + crop = fluid.layers.crop(z, shape=[2, 3]) + + """ + helper = LayerHelper('crop', **locals()) + + if not (isinstance(shape, list) or isinstance(shape, tuple) or \ + isinstance(shape, Variable)): + raise ValueError("The shape should be a list, tuple or Variable.") + + if offsets is None: + offsets = [0] * len(x.shape) + + out = helper.create_tmp_variable(x.dtype) + ipts = {'X': x} + attrs = {} + if isinstance(shape, Variable): + ipts['Y'] = shape + else: + attrs['shape'] = shape + if isinstance(offsets, Variable): + ipts['Offsets'] = offsets + else: + attrs['offsets'] = offsets + + helper.append_op( + type='crop', + inputs=ipts, + outputs={'Out': out}, + attrs=None if len(attrs) == 0 else attrs) + return out + + +def rank_loss(label, left, right, name=None): + """ + **Rank loss layer for RankNet** + + RankNet(http://icml.cc/2015/wp-content/uploads/2015/06/icml_ranking.pdf) + is a pairwise ranking model with a training sample consisting of a pair + of documents, A and B. Label P indicates whether A is ranked higher than B + or not: + + P = {0, 1} or {0, 0.5, 1}, where 0.5 means that there is no information + about the rank of the input pair. + + Rank loss layer takes three inputs: left (o_i), right (o_j) and + label (P_{i,j}). The inputs respectively represent RankNet's output scores + for documents A and B and the value of label P. The following equation + computes rank loss C_{i,j} from the inputs: + + $$ + C_{i,j} = -\tilde{P_{ij}} * o_{i,j} + \log(1 + e^{o_{i,j}}) \\ + o_{i,j} = o_i - o_j \\ + \tilde{P_{i,j}} = \left \{0, 0.5, 1 \right \} \ or \ \left \{0, 1 \right \} + $$ + + Rank loss layer takes batch inputs with size batch_size (batch_size >= 1). + + Args: + label (Variable): Indicats whether A ranked higher than B or not. + left (Variable): RankNet's output score for doc A. + right (Variable): RankNet's output score for doc B. + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. + + Returns: + list: The value of rank loss. + + Raises: + ValueError: Any of label, left, and right is not a variable. + + Examples: + + .. code-block:: python + + label = fluid.layers.data(name="label", shape=[4, 1], dtype="float32") + left = fluid.layers.data(name="left", shape=[4, 1], dtype="float32") + right = fluid.layers.data(name="right", shape=[4, 1], dtype="float32") + out = fluid.layers.rank_loss(label, left, right) + + + """ + helper = LayerHelper('rank_loss', **locals()) + + if not (isinstance(label, Variable)): + raise ValueError("The label should be a Variable") + + if not (isinstance(left, Variable)): + raise ValueError("The left should be a Variable") + + if not (isinstance(right, Variable)): + raise ValueError("The right should be a Variable") + + out = helper.create_tmp_variable("float32") + + helper.append_op( + type='rank_loss', + inputs={"Label": label, + "Left": left, + "Right": right}, + outputs={'Out': out}) + return out diff --git a/python/paddle/fluid/layers/ops.py b/python/paddle/fluid/layers/ops.py index a9fe25744c..f70c7f2258 100644 --- a/python/paddle/fluid/layers/ops.py +++ b/python/paddle/fluid/layers/ops.py @@ -11,13 +11,12 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from layer_function_generator import generate_layer_fn +from .layer_function_generator import generate_layer_fn __activations__ = [ 'sigmoid', 'logsigmoid', 'exp', - 'relu', 'tanh', 'tanh_shrink', 'softshrink', @@ -29,7 +28,6 @@ __activations__ = [ 'sin', 'round', 'reciprocal', - 'log', 'square', 'softplus', 'softsign', @@ -40,8 +38,6 @@ __activations__ = [ 'relu6', 'pow', 'stanh', - 'hard_shrink', - 'thresholded_relu', 'hard_sigmoid', 'swish', ] @@ -64,14 +60,100 @@ __all__ = [ 'logical_or', 'logical_xor', 'logical_not', - 'uniform_random', 'uniform_random_batch_size_like', 'gaussian_random', 'gaussian_random_batch_size_like', - 'cumsum', 'scatter', 'sum', + 'slice', + 'shape', + 'maxout', ] + __activations__ for _OP in set(__all__): globals()[_OP] = generate_layer_fn(_OP) + +__all__ += ["uniform_random"] + +_uniform_random_ = generate_layer_fn('uniform_random') + + +def uniform_random(shape, dtype=None, min=None, max=None, seed=None): + kwargs = dict() + for name in locals(): + val = locals()[name] + if val is not None: + kwargs[name] = val + return _uniform_random_(**kwargs) + + +uniform_random.__doc__ = _uniform_random_.__doc__ + """ +Examples: + + >>> result = fluid.layers.uniform_random(shape=[32, 784]) +""" + +__all__ += ['hard_shrink'] + +_hard_shrink_ = generate_layer_fn('hard_shrink') + + +def hard_shrink(x, threshold=None): + kwargs = dict() + for name in locals(): + val = locals()[name] + if val is not None: + kwargs[name] = val + return _hard_shrink_(**kwargs) + + +hard_shrink.__doc__ = _hard_shrink_.__doc__ + """ +Examples: + + >>> data = fluid.layers.data(name="input", shape=[784]) + >>> result = fluid.layers.hard_shrink(x=data, threshold=0.3) +""" + +__all__ += ['cumsum'] + +_cum_sum_ = generate_layer_fn('cumsum') + + +def cumsum(x, axis=None, exclusive=None, reverse=None): + kwargs = dict() + for name in locals(): + val = locals()[name] + if val is not None: + kwargs[name] = val + + return _cum_sum_(**kwargs) + + +cumsum.__doc__ = _cum_sum_.__doc__ + """ +Examples: + + >>> data = fluid.layers.data(name="input", shape=[32, 784]) + >>> result = fluid.layers.cumsum(data, axis=0) +""" + +__all__ += ['thresholded_relu'] + +_thresholded_relu_ = generate_layer_fn('thresholded_relu') + + +def thresholded_relu(x, threshold=None): + kwargs = dict() + for name in locals(): + val = locals()[name] + if val is not None: + kwargs[name] = val + + _thresholded_relu_(**kwargs) + + +thresholded_relu.__doc__ = _thresholded_relu_.__doc__ + """ +Examples: + + >>> data = fluid.layers.data(name="input", shape=[1]) + >>> result = fluid.layers.thresholded_relu(data, threshold=0.4) +""" diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index 4be0dc6a6b..b93d721c12 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -6,7 +6,7 @@ # # http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software +# Unlessf required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and @@ -18,6 +18,7 @@ from ..framework import convert_np_dtype_to_dtype_ from ..framework import Variable from ..initializer import Constant, force_init_on_cpu from ..core import VarDesc +from .layer_function_generator import templatedoc import numpy __all__ = [ @@ -30,12 +31,34 @@ __all__ = [ 'assign', 'fill_constant_batch_size_like', 'fill_constant', + 'argmin', + 'argmax', + 'argsort', 'ones', 'zeros', + 'reverse', ] def create_tensor(dtype, name=None, persistable=False): + """ + Create an variable, which will hold a LoDTensor with data type dtype. + + Args: + dtype(string): 'float32'|'int32'|..., the data type of the + created tensor. + name(string): The name of the created tensor, if not set, + the name will be a random unique one. + persistable(bool): Set the persistable flag of the create tensor. + + Returns: + Variable: The tensor variable storing the created tensor. + + Examples: + .. code-block:: python + + tensor = fluid.layers.create_tensor(dtype='float32') + """ helper = LayerHelper("create_tensor", **locals()) return helper.create_variable( name=helper.name, dtype=dtype, persistable=persistable) @@ -48,7 +71,12 @@ def create_parameter(shape, is_bias=False, default_initializer=None): """ - Create a parameter + Create a parameter. The parameter is a learnable variable, which can have + gradient, and can be optimized. + + NOTE: this is a very low-level API. This API is useful when you create + operator by your self. instead of using layers. + Args: shape(list[int]): shape of the parameter dtype(string): element type of the parameter @@ -60,7 +88,12 @@ def create_parameter(shape, default_initializer(Initializer): initializer for the parameter Returns: - Parameter: the created parameter + the created parameter. + + Examples: + >>> W = fluid.layers.create_parameter(shape=[784, 200], dtype='float32') + >>> data = fluid.layers.data(name="img", shape=[64, 784], append_batch_size=False) + >>> hidden = fluid.layers.matmul(x=data, y=W) """ helper = LayerHelper("create_parameter", **locals()) if attr is None: @@ -76,16 +109,29 @@ def create_global_var(shape, force_cpu=False, name=None): """ - Create a global variable. such as global_step + Create a new variable in the global block(block 0). + Args: shape(list[int]): shape of the variable - value(float): the value of the variable - dtype(string): element type of the parameter - persistable(bool): if this variable is persistable - force_cpu(bool): force this variable to be on CPU + value(float): the value of the variable. The new created + variable will be filled with it. + dtype(string): data type of the variable + persistable(bool): if this variable is persistable. + Default: False + force_cpu(bool): force this variable to be on CPU. + Default: False + name(str|None): The name of the variable. If set to None the variable + name will be generated automatically. + Default: None Returns: Variable: the created Variable + + Examples: + .. code-block:: python + + var = fluid.create_global_var(shape=[2,3], value=1.0, dtype='float32', + persistable=True, force_cpu=True, name='new_var') """ helper = LayerHelper("global_var", **locals()) var = helper.create_global_variable( @@ -98,8 +144,21 @@ def create_global_var(shape, def cast(x, dtype): """ - This function takes in the input with input_dtype - and casts it to the output_dtype as the output. + This layer takes in the Variable :attr:`x` with :attr:`x.dtype` and casts + it to the output with :attr:`dtype`. + + Args: + x (Variable): The input Variable for casting. + dtype(np.dtype|core.VarDesc.VarType|str): Data type of the output Variable. + + Returns: + Variable: The output Variable after casting. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name='x', shape=[13], dtype='float32') + result = fluid.layers.cast(x=data, dtype='float64') """ helper = LayerHelper('cast', **locals()) out = helper.create_tmp_variable(dtype=dtype) @@ -112,7 +171,7 @@ def cast(x, dtype): return out -def concat(input, axis=0): +def concat(input, axis=0, name=None): """ **Concat** @@ -122,13 +181,16 @@ def concat(input, axis=0): Args: input(list): List of tensors to be concatenated axis(int): Integer axis along which the tensors will be concatenated + name(str|None): A name for this layer(optional). If set None, the layer + will be named automatically. Returns: Variable: Output variable of the concatenation Examples: .. code-block:: python - out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) + + out = fluid.layers.concat(input=[Efirst, Esecond, Ethird, Efourth]) """ helper = LayerHelper('concat', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) @@ -141,19 +203,21 @@ def concat(input, axis=0): def sums(input, out=None): - """This function performs the sum operation on the input and returns the + """ + This function performs the sum operation on the input and returns the result as the output. Args: input (Variable|list): The input tensor that has the elements that need to be summed up. + out (Variable|None): Output parameter. The sum result. + Default: None Returns: - Variable: The tensor type variable that has the sum of input - written to it. + Variable: the sum of input. The same as the argument 'out' Examples: - .. code-block::python + .. code-block:: python tmp = fluid.layers.zeros(shape=[10], dtype='int32') i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10) @@ -167,11 +231,15 @@ def sums(input, out=None): helper = LayerHelper('sum', **locals()) if out is None: out = helper.create_tmp_variable(dtype=helper.input_dtype()) - helper.append_op(type='sum', inputs={'X': input}, outputs={'Out': out}) + helper.append_op( + type='sum', + inputs={'X': input}, + outputs={'Out': out}, + attrs={'use_mkldnn': False}) return out -def assign(input, output): +def assign(input, output=None): """ **Assign** @@ -179,18 +247,21 @@ def assign(input, output): Args: input(Variable|numpy.ndarray): The source variable - output(Variable): The destination variable + output(Variable|None): The destination variable Returns: Variable: The destination variable that was supplied as the *output*. Examples: .. code-block:: python + out = fluid.layers.create_tensor(dtype='float32') hidden = fluid.layers.fc(input=data, size=10) fluid.layers.assign(hidden, out) """ helper = LayerHelper('assign', **locals()) + if output is None: + output = helper.create_tmp_variable(dtype=input.dtype) if isinstance(input, Variable): helper.append_op( type='assign', inputs={'X': [input]}, outputs={'Out': [output]}) @@ -264,6 +335,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None): return out +@templatedoc() def fill_constant_batch_size_like(input, shape, dtype, @@ -271,30 +343,28 @@ def fill_constant_batch_size_like(input, input_dim_idx=0, output_dim_idx=0): """ - **fill_constant_batch_size_like** - - This function creates a tensor of specified *shape*, *dtype* and batch size, - and initializes this with a constant supplied in *value*. The batch size is - obtained from the `input` tensor. + ${comment} It also sets *stop_gradient* to True. + >>> data = fluid.layers.fill_constant_batch_size_like( + >>> input=like, shape=[1], value=0, dtype='int64') + Args: - input(Variable): Tensor whose dimensions will be used to get batch size - shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor - value(float): Constant value to initialize the output tensor - input_dim_idx(int): Index of input's batch size dimension - output_dim_idx(int): Index of output's batch size dimension + input(${input_type}): ${input_comment}. - Returns: - Variable: The tensor variable storing the output + shape(${shape_type}): ${shape_comment}. - Examples: - .. code-block:: python + dtype(${dtype_type}): ${dtype_comment}. + + value(${value_type}): ${value_comment}. + + input_dim_idx(${input_dim_idx_type}): ${input_dim_idx_comment}. + + output_dim_idx(${output_dim_idx_type}): ${output_dim_idx_comment}. - data = fluid.layers.fill_constant_batch_size_like( - input=like, shape=[1], value=0, dtype='int64') + Returns: + ${out_comment}. """ helper = LayerHelper("fill_constant_batch_size_like", **locals()) out = helper.create_tmp_variable(dtype=dtype) @@ -313,6 +383,120 @@ def fill_constant_batch_size_like(input, return out +def argmin(x, axis=0): + """ + **argmin** + + This function computes the indices of the min elements + of the input tensor's element along the provided axis. + + Args: + x(Variable): The input to compute the indices of + the min elements. + axis(int): Axis to compute indices along. + + Returns: + Variable: The tensor variable storing the output + + Examples: + .. code-block:: python + + out = fluid.layers.argmin(x=in, axis=0) + out = fluid.layers.argmin(x=in, axis=-1) + """ + helper = LayerHelper("arg_min", **locals()) + out = helper.create_tmp_variable(VarDesc.VarType.INT64) + helper.append_op( + type='arg_min', + inputs={'X': x}, + outputs={'Out': [out]}, + attrs={'axis': axis}) + return out + + +def argmax(x, axis=0): + """ + **argmax** + + This function computes the indices of the max elements + of the input tensor's element along the provided axis. + + Args: + x(Variable): The input to compute the indices of + the max elements. + axis(int): Axis to compute indices along. + + Returns: + Variable: The tensor variable storing the output + + Examples: + .. code-block:: python + + out = fluid.layers.argmax(x=in, axis=0) + out = fluid.layers.argmax(x=in, axis=-1) + """ + helper = LayerHelper("arg_max", **locals()) + out = helper.create_tmp_variable(VarDesc.VarType.INT64) + helper.append_op( + type='arg_max', + inputs={'X': x}, + outputs={'Out': [out]}, + attrs={'axis': axis}) + return out + + +def argsort(input, axis=-1, name=None): + """ + Performs sorting on the input Variable along the given axis, and outputs + sorted data Varibale and its corresponding index Variable with the same + shape as :attr:`input`. + + .. code-block:: text + + For example, the given axis is -1 and the input Variable + + input = [[0.15849551, 0.45865775, 0.8563702 ], + [0.12070083, 0.28766365, 0.18776911]], + + after argsort, the sorted Vairable becomes + + out = [[0.15849551, 0.45865775, 0.8563702 ], + [0.12070083, 0.18776911, 0.28766365]], + + and the sorted indices along the given axis turn outs to be + + indices = [[0, 1, 2], + [0, 2, 1]] + + Args: + input(Variable): The input Variable for sorting. + axis(int): The axis along which to sort the input Variable. When + :attr:`axis` < 0, the actual axis will be :attr:`axis` + + rank(:attr:`input`). Default -1, the last dimension. + name(str|None): (optional) A name for this layer. If set None, the + layer will be named automatically. + + Returns: + tuple: A tuple of sorted data Variable and the sorted indices. + + Examples: + .. code-block:: python + + input = fluid.layers.data(data=[2, 3]) + out, indices = fluid.layers.argsort(input, axis=0) + """ + helper = LayerHelper("argsort", **locals()) + out = helper.create_tmp_variable(dtype=input.dtype, stop_gradient=True) + ids = helper.create_tmp_variable(VarDesc.VarType.INT64, stop_gradient=True) + helper.append_op( + type='argsort', + inputs={'X': input}, + outputs={'Out': out, + 'Indices': ids}, + attrs={'axis': axis}) + return out, ids + + def ones(shape, dtype, force_cpu=False): """ **ones** @@ -347,11 +531,12 @@ def zeros(shape, dtype, force_cpu=False): It also sets *stop_gradient* to True. Args: - shape(tuple|list|None): Shape of output tensor - dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor + shape(tuple|list|None): Shape of output tensor. + dtype(np.dtype|core.VarDesc.VarType|str): Data type of output tensor. + force_cpu(bool, default False): Whether to make output stay on CPU. Returns: - Variable: The tensor variable storing the output + Variable: The tensor variable storing the output. Examples: .. code-block:: python @@ -361,6 +546,40 @@ def zeros(shape, dtype, force_cpu=False): return fill_constant(value=0.0, **locals()) +def reverse(x, axis): + """ + **reverse** + + This function reverse the input 'x' along given axises. + + Args: + x(Vairbale): the input to be reversed. + axis(int|tuple|list): Axis that along which order of elements + is reversed. If it is a tuple or a list, reversing + will be apply on each axis in the tuple or list. + + Returns: + Variable: The reversed tensor. + + Examples: + .. code-block:: python + + out = fluid.layers.reverse(x=in, axis=0) + # or: + out = fluid.layers.reverse(x=in, axis=[0,1]) + """ + if isinstance(axis, int): + axis = [axis] + helper = LayerHelper("reverse", **locals()) + out = helper.create_tmp_variable(dtype=x.dtype) + helper.append_op( + type='reverse', + inputs={'Input': x}, + outputs={'Out': [out]}, + attrs={'axis': axis}) + return out + + def save(x, file_path, overwrite=True): """ Saves a variable as a file. @@ -368,9 +587,9 @@ def save(x, file_path, overwrite=True): Args: x(variable): The Tensor/LoDTensor to be saved. file_path(str): The file path where the variable will be saved. - overwrite(bool): Whether or not cover the given file when it has already - existed. If it's set 'False' and the file is existed, a runtime - error will be thrown. + overwrite(bool): Whether or not cover the given file when it has already + existed. If it's set 'False' and the file is existed, a runtime + error will be thrown. """ helper = LayerHelper("save", **locals()) helper.append_op( @@ -386,11 +605,27 @@ def save_combine(x, file_path, overwrite=True): Saves a list of variables into a single file. Args: - x(list): A list of Tensor/LoDTensor to be saved together in a single file. + x(list): A list of Tensor/LoDTensor variables to be saved together in + a single file. file_path(str): The file path where variables will be saved. - overwrite(bool): Whether or not cover the given file when it has already - existed. If it's set 'False' and the file is existed, a runtime - error will be thrown. + overwrite(bool): Whether or not cover the given file when it has already + existed. If it's set 'False' and the file is existed, a runtime + error will be thrown. + + Returns: + There is no return value. + + Examples: + + .. code-block:: python + + v1 = fluid.layers.data(name="data", + shape=(4, 6), + dtype="float32") + v2 = fluid.layers.data(name="data", + shape=(6, 8, 4), + dtype="float32") + normed = fluid.layers.save_combine([v1, v2], file_path="output") """ helper = LayerHelper("save_combine", **locals()) helper.append_op( @@ -401,22 +636,6 @@ def save_combine(x, file_path, overwrite=True): "overwrite": overwrite}) -def load(out, file_path): - """ - Loads a variable from a given file. - - Args: - out(variable): The variable to be read from the disk file. - file_path(str): The path of the disk file. - """ - helper = LayerHelper("load", **locals()) - helper.append_op( - type="load", - inputs={}, - output={"Out": out}, - args={"file_path": file_path}) - - def load_combine(out, file_path): """ Loads a list of vairables from a single file. diff --git a/python/paddle/fluid/lod_tensor.py b/python/paddle/fluid/lod_tensor.py new file mode 100644 index 0000000000..53c33616f5 --- /dev/null +++ b/python/paddle/fluid/lod_tensor.py @@ -0,0 +1,134 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from . import core +import numpy as np + +__all__ = ['create_lod_tensor', 'create_random_int_lodtensor'] + + +def create_lod_tensor(data, recursive_seq_lens, place): + """ + Create a lod tensor from a numpy array, a list, or an existing lod tensor. + + Create a lod tensor by doing the following: + + 1. Check that the length-based level of detail (LoD) also known as + recursive_sequence_lengths of the input is valid. + + 2. Convert recursive_sequence_lengths to a offset-based LoD. + + 3. Copy the data from a numpy array, a list or a existing lod tensor to + CPU or GPU device (based on input place). + + 4. Set the level of detail (LoD) using the offset-based LoD. + + Examples: + + Suppose we want LoDTensor to hold data for sequences of word, where each + word is represented by an integer. If we want to create a LoDTensor to + represent two sentences, one of 2 words, and one of 3 words. + + Then :code:`data` can be a numpy array of integers with shape (5, 1). + :code:`recursive_seq_lens` will be [[2, 3]], indicating the length(# of words) in each + sentence. This length-based :code:`recursive_seq_lens` [[2, 3]] will be converted to + offset-based LoD [[0, 2, 5]] inside the function call. + + Please reference :ref:`api_guide_low_level_lod_tensor` for more details + regarding LoD. + + Args: + data(numpy.ndarray|list|LoDTensor): a numpy array or a LoDTensor or a + list holding the data to be copied. + recursive_seq_lens(list): a list of lists indicating the length-based level of detail + info specified by the user. + place(Place): CPU or GPU place indicating where the data in the new + LoDTensor will be stored. + + Returns: + A fluid LoDTensor object with tensor data and recursive_seq_lens info. + """ + if isinstance(data, core.LoDTensor): + return create_lod_tensor(np.array(data), recursive_seq_lens, place) + elif isinstance(data, list): + # When input data is a list, it only deal with the case where the base element + # is an index of shape [1] and dtype int64 (e.g., word id). Hence, the generated + # LoDTensor will be of shape [n, 1] and dtype int64, where `n` is the total number + # of words or other indexes in the sequence. + new_recursive_seq_lens = [] + for seq in data: + new_recursive_seq_lens.append(len(seq)) + assert [ + new_recursive_seq_lens + ] == recursive_seq_lens, "data and recursive_seq_lens do not match" + flattened_data = np.concatenate(data, axis=0).astype("int64") + flattened_data = flattened_data.reshape([len(flattened_data), 1]) + return create_lod_tensor(flattened_data, recursive_seq_lens, place) + elif isinstance(data, np.ndarray): + tensor = core.LoDTensor() + tensor.set(data, place) + tensor.set_recursive_sequence_lengths(recursive_seq_lens) + assert tensor.has_valid_recursive_sequence_lengths( + ), "the provided lod info is invalid" + return tensor + else: + raise TypeError( + "data should be either a LoDTensor, a Numpy array or a list") + + +def create_random_int_lodtensor(recursive_seq_lens, base_shape, place, low, + high): + """ + Create a LoDTensor containing random integers. + + This function is frequently used in the book examples. So we revised it + based on the new create_lod_tensor API and put it here in the lod_tensor + module to simplify the code. + + The function does the following: + + 1. Calculate the overall shape of the LoDTensor based on the length-based + :code:`recursive_seq_lens` input and the shape of the basic element in + :code:`base_shape`. + + 2. Create a numpy array of this shape. + + 3. Create the LoDTensor using create_lod_tensor API. + + Suppose we want LoDTensor to hold data for sequences of word, where each + word is represented by an integer. If we want to create a LoDTensor to + represent two sentences, one of 2 words, and one of 3 words. Then + 'base_shape' is [1], input length-based 'recursive_seq_lens' is [[2, 3]]. + Then the overall shape of the LoDTensor would be [5, 1], holding 5 words + for two sentences. + + Args: + recursive_seq_lens(list): a list of lists indicating the length-based + level of detail info specified by the user. + base_shape(list): the shape of the basic element to be held by the + LoDTensor. + place(Place): CPU or GPU place indicating where the data in the new + LoDTensor will be stored. + low(int): the lower bound of the random integers. + high(int): the upper bound of the random integers. + + Returns: + A fluid LoDTensor object with tensor data and recursive_seq_lens info. + """ + assert isinstance(base_shape, list), "base_shape should be a list" + # append the total number of basic elements to the front of its shape + overall_shape = [sum(recursive_seq_lens[-1])] + base_shape + # the range of integer data elements is [low, high] + data = np.random.random_integers(low, high, overall_shape).astype("int64") + return create_lod_tensor(data, recursive_seq_lens, place) diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index bb9c6fdc60..cd89345227 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -23,6 +23,8 @@ import warnings __all__ = [ 'MetricBase', 'CompositeMetric', + 'Precision', + 'Recall', 'Accuracy', 'ChunkEvaluator', 'EditDistance', @@ -46,40 +48,41 @@ def _is_number_or_matrix_(var): class MetricBase(object): """ - Base Class for all evaluators + Base Class for all Metrics. + MetricBase define a group of interfaces for the + model evaluation methods. Metrics accumulate metric states between + consecutive minibatches, at every minibatch, use update + interface to add current minibatch value to global states. + Use eval to compute accumative metric value from last reset() + or from scratch on. + If you need to custom a new metric, please inherit from MetricBase and + custom implementation. Args: - name(str): The name of evaluator. such as, "accuracy". Used for generate - temporary variable name. - Interface: - Note(*) : the states is the attributes who not has _ prefix. - - get_config(): print current states and configuration - reset(): clear the states. If the Metrics states type is not (int, float, np.ndarray), - Please override this method. - update(): update states at every minibatch - eval(): get metric evaluation in numpy type. + name(str): The name of metric instance. such as, "accuracy". + It needed if you want to distinct different metrics in a model. + """ - def __init__(self, name, **kwargs): + def __init__(self, name): self._name = str(name) if name != None else self.__class__.__name__ - self._kwargs = kwargs if kwargs != None else dict() - self.reset() def __str__(self): return self._name def reset(self): """ - states is the attributes who not has _ prefix. - reset the states of metrics. + reset clear the states of metrics. By default, the states + are the members who do not has _ prefix, reset set them to inital states. + If you violate the implicit name rule, please also custom the reset + interface. """ states = { attr: value - for attr, value in self.__dict__.iteritems() + for attr, value in list(self.__dict__.items()) if not attr.startswith("_") } - for attr, value in states.iteritems(): + for attr, value in list(states.items()): if isinstance(value, int): setattr(self, attr, 0) elif isinstance(value, float): @@ -90,61 +93,231 @@ class MetricBase(object): setattr(self, attr, None) def get_config(self): + """ + Get the metric and current states. + The states are the members who do not has "_" prefix. + + Args: + None + + Returns: + dict: a dict of metric and states + """ states = { attr: value - for attr, value in self.__dict__.iteritems() + for attr, value in list(self.__dict__.items()) if not attr.startswith("_") } - config = copy.deepcopy(self._kwargs) + config = {} config.update({"name": self._name, "states": copy.deepcopy(states)}) return config - def update(self): - raise NotImplementedError() + def update(self, preds, labels): + """ + Updates the metric states at every minibatch. + One user can compute the minibatch metric via pure Python, or + via a c++ operator. + + Args: + preds(numpy.array): the predictions of current minibatch + labels(numpy.array): the labels of current minibatch, if the label is one-hot + or soft-label, should custom the corresponding update rule. + """ + raise NotImplementedError( + "Should not use it directly, please extend it.") def eval(self): - raise NotImplementedError() + """ + Evalute the current metrics based the accumulated states. + + Returns: + float|list(float)|numpy.array: the metrics via Python. + """ + raise NotImplementedError( + "Should not use it directly, please extend it.") class CompositeMetric(MetricBase): """ - Compute multiple metrics in each minibatch. + Composite multiple metrics in one instance. for example, merge F1, accuracy, recall into one Metric. + + Examples: + .. code-block:: python + + labels = fluid.layers.data(name="data", shape=[1], dtype="int32") + data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32") + pred = fluid.layers.fc(input=data, size=1000, act="tanh") + comp = fluid.metrics.CompositeMetric() + acc = fluid.metrics.Precision() + recall = fluid.metrics.Recall() + comp.add_metric(acc) + comp.add_metric(recall) + for pass in range(PASSES): + comp.reset() + for data in train_reader(): + loss, preds, labels = exe.run(fetch_list=[cost, preds, labels]) + comp.update(preds=preds, labels=labels) + numpy_acc, numpy_recall = comp.eval() """ - def __init__(self, name=None, **kwargs): - super(CompositeMetric, self).__init__(name, kwargs) + def __init__(self, name=None): + super(CompositeMetric, self).__init__(name) self._metrics = [] def add_metric(self, metric): + """ + add one metric instance to CompositeMetric. + + Args: + metric: a instance of MetricBase. + """ if not isinstance(metric, MetricBase): raise ValueError("SubMetric should be inherit from MetricBase.") self._metrics.append(metric) + def update(self, preds, labels): + """ + Update every metrics in sequence. + + Args: + preds(numpy.array): the predictions of current minibatch + labels(numpy.array): the labels of current minibatch, if the label is one-hot + or soft-label, should custom the corresponding update rule. + """ + for m in self._metrics: + ans.append(m.update(preds, labels)) + def eval(self): + """ + Evaluate every metrics in sequence. + + Returns: + list(float|numpy.array): a list of metrics value in Python. + """ ans = [] for m in self._metrics: ans.append(m.eval()) return ans +class Precision(MetricBase): + """ + Precision (also called positive predictive value) is the fraction of + relevant instances among the retrieved instances. + https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers + + Note Precision is different with Accuracy in binary classifiers. + accuracy = true positive / total instances + precision = true positive / all positive instance + + Examples: + .. code-block:: python + + metric = fluid.metrics.Precision() + for pass in range(PASSES): + metric.reset() + for data in train_reader(): + loss, preds, labels = exe.run(fetch_list=[cost, preds, labels]) + metric.update(preds=preds, labels=labels) + numpy_precision = metric.eval() + """ + + def __init__(self, name=None): + super(Precision, self).__init__(name) + self.tp = 0 # true positive + self.fp = 0 # false positive + + def update(self, preds, labels): + if not _is_numpy_(preds): + raise ValueError("The 'preds' must be a numpy ndarray.") + if not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray.") + sample_num = labels[0] + for i in range(sample_num): + pred = preds[i].astype("int32") + label = labels[i] + if label == 1: + if pred == label: + self.tp += 1 + else: + self.fp += 1 + + def eval(self): + ap = self.tp + self.fp + return float(self.tp) / ap if ap != 0 else .0 + + +class Recall(MetricBase): + """ + Recall (also known as sensitivity) is the fraction of + relevant instances that have been retrieved over the + total amount of relevant instances + + https://en.wikipedia.org/wiki/Precision_and_recall + + Examples: + .. code-block:: python + + metric = fluid.metrics.Recall() + for pass in range(PASSES): + metric.reset() + for data in train_reader(): + loss, preds, labels = exe.run(fetch_list=[cost, preds, labels]) + metric.update(preds=preds, labels=labels) + numpy_recall = metric.eval() + """ + + def __init__(self, name=None): + super(Recall, self).__init__(name) + self.tp = 0 # true positive + self.fn = 0 # false negtive + + def update(self, preds, labels): + if not _is_numpy_(preds): + raise ValueError("The 'preds' must be a numpy ndarray.") + if not _is_numpy_(labels): + raise ValueError("The 'labels' must be a numpy ndarray.") + sample_num = labels[0] + for i in range(sample_num): + pred = preds[i].astype("int32") + label = labels[i] + if label == 1: + if pred == label: + self.tp += 1 + else: + if pred != label: + self.fn += 1 + + def eval(self): + recall = self.tp + self.fn + return float(self.tp) / recall if recall != 0 else .0 + + class Accuracy(MetricBase): """ Accumulate the accuracy from minibatches and compute the average accuracy for every pass. + https://en.wikipedia.org/wiki/Accuracy_and_precision Args: name: the metrics name - Example: - minibatch_accuracy = fluid.layers.accuracy(pred, label) - accuracy_evaluator = fluid.metrics.Accuracy() - for epoch in PASS_NUM: - accuracy_evaluator.reset() - for data in batches: - loss = exe.run(fetch_list=[cost, minibatch_accuracy]) - accuracy_evaluator.update(value=minibatch_accuracy, weight=batches) - accuracy = accuracy_evaluator.eval() + Examples: + .. code-block:: python + + labels = fluid.layers.data(name="data", shape=[1], dtype="int32") + data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32") + pred = fluid.layers.fc(input=data, size=1000, act="tanh") + minibatch_accuracy = fluid.layers.accuracy(pred, label) + accuracy_evaluator = fluid.metrics.Accuracy() + for pass in range(PASSES): + accuracy_evaluator.reset() + for data in train_reader(): + batch_size = data[0] + loss = exe.run(fetch_list=[cost, minibatch_accuracy]) + accuracy_evaluator.update(value=minibatch_accuracy, weight=batch_size) + numpy_acc = accuracy_evaluator.eval() """ def __init__(self, name=None): @@ -153,6 +326,13 @@ class Accuracy(MetricBase): self.weight = .0 def update(self, value, weight): + """ + Update minibatch states. + + Args: + value(float|numpy.array): accuracy of one minibatch. + weight(int|float): batch size. + """ if not _is_number_or_matrix_(value): raise ValueError( "The 'value' must be a number(int, float) or a numpy ndarray.") @@ -163,9 +343,8 @@ class Accuracy(MetricBase): def eval(self): if self.weight == 0: - raise ValueError( - "There is no data in Accuracy Metrics. Please check layers.accuracy output has added to Accuracy." - ) + raise ValueError("There is no data in Accuracy Metrics. \ + Please check layers.accuracy output has added to Accuracy.") return self.value / self.weight @@ -174,6 +353,25 @@ class ChunkEvaluator(MetricBase): Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter numbers. + For some basics of chunking, please refer to + 'Chunking with Support Vector Machines '. + ChunkEvalEvaluator computes the precision, recall, and F1-score of chunk detection, + and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. + + Examples: + .. code-block:: python + + labels = fluid.layers.data(name="data", shape=[1], dtype="int32") + data = fluid.layers.data(name="data", shape=[32, 32], dtype="int32") + pred = fluid.layers.fc(input=data, size=1000, act="tanh") + precision, recall, f1_score, num_infer_chunks, num_label_chunks, num_correct_chunks = layers.chunk_eval( + input=pred, + label=label) + metric = fluid.metrics.ChunkEvaluator() + for data in train_reader(): + loss, preds, labels = exe.run(fetch_list=[cost, preds, labels]) + metric.update(num_infer_chunks, num_label_chunks, num_correct_chunks) + numpy_precision, numpy_recall, numpy_f1 = metric.eval() """ def __init__(self, name=None): @@ -183,9 +381,17 @@ class ChunkEvaluator(MetricBase): self.num_correct_chunks = 0 def update(self, num_infer_chunks, num_label_chunks, num_correct_chunks): + """ + Update the states based on the layers.chunk_eval() ouputs. + Args: + num_infer_chunks(int|numpy.array): The number of chunks in Inference on the given minibatch. + num_label_chunks(int|numpy.array): The number of chunks in Label on the given mini-batch. + num_correct_chunks(int|float|numpy.array): The number of chunks both in Inference and Label on the + given mini-batch. + """ if not _is_number_or_matrix_(num_infer_chunks): raise ValueError( - "The 'num_infer_chunks' must be a number(int, float) or a numpy ndarray." + "The 'num_infer_chunks' must be a number(int) or a numpy ndarray." ) if not _is_number_or_matrix_(num_label_chunks): raise ValueError( @@ -212,21 +418,28 @@ class ChunkEvaluator(MetricBase): class EditDistance(MetricBase): """ + Edit distance is a way of quantifying how dissimilar two strings + (e.g., words) are to one another by counting the minimum number + of operations required to transform one string into the other. + Refer to https://en.wikipedia.org/wiki/Edit_distance + Accumulate edit distance sum and sequence number from mini-batches and compute the average edit_distance and instance error of all batches. Args: name: the metrics name - Example: - edit_distance_metrics = fluid.layers.edit_distance(input, label) - distance_evaluator = fluid.metrics.EditDistance() - for epoch in PASS_NUM: - distance_evaluator.reset() - for data in batches: - loss = exe.run(fetch_list=[cost] + list(edit_distance_metrics)) - distance_evaluator.update(*edit_distance_metrics) - distance, instance_error = distance_evaluator.eval() + Examples: + .. code-block:: python + + distances, seq_num = fluid.layers.edit_distance(input, label) + distance_evaluator = fluid.metrics.EditDistance() + for epoch in PASS_NUM: + distance_evaluator.reset() + for data in batches: + loss = exe.run(fetch_list=[cost] + list(edit_distance_metrics)) + distance_evaluator.update(distances, seq_num) + distance, instance_error = distance_evaluator.eval() In the above example: 'distance' is the average of the edit distance in a pass. @@ -264,16 +477,38 @@ class EditDistance(MetricBase): class DetectionMAP(MetricBase): """ Calculate the detection mean average precision (mAP). - - TODO (Dang Qingqing): update the following doc. - The general steps are as follows: - 1. calculate the true positive and false positive according to the input - of detection and labels. - 2. calculate mAP value, support two versions: '11 point' and 'integral'. - + mAP is the metric to measure the accuracy of object detectors + like Faster R-CNN, SSD, etc. + It is the average of the maximum precisions at different recall values. Please get more information from the following articles: https://sanchom.wordpress.com/tag/average-precision/ + https://arxiv.org/abs/1512.02325 + + The general steps are as follows: + + 1. calculate the true positive and false positive according to the input + of detection and labels. + 2. calculate mAP value, support two versions: '11 point' and 'integral'. + + Examples: + .. code-block:: python + + pred = fluid.layers.fc(input=data, size=1000, act="tanh") + batch_map = layers.detection_map( + input, + label, + class_num, + background_label, + overlap_threshold=overlap_threshold, + evaluate_difficult=evaluate_difficult, + ap_version=ap_version) + metric = fluid.metrics.DetectionMAP() + for data in train_reader(): + loss, preds, labels = exe.run(fetch_list=[cost, batch_map]) + batch_size = data[0] + metric.update(value=batch_map, weight=batch_size) + numpy_map = metric.eval() """ def __init__(self, name=None): @@ -302,17 +537,18 @@ class DetectionMAP(MetricBase): class Auc(MetricBase): """ - Auc Metrics which adapts to binary classification. - Need to note that auc metrics compute the value via Python natively. + Auc metric adapts to the binary classification. + Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve + Need to note that auc metric compute the value via Python natively. If you concern the speed, please use the fluid.layers.auc instead. The `auc` function creates four local variables, `true_positives`, - `true_negatives`, `false_positives` and `false_negatives` that are used to - compute the AUC. To discretize the AUC curve, a linearly spaced set of - thresholds is used to compute pairs of recall and precision values. The area - under the ROC-curve is therefore computed using the height of the recall - values by the false positive rate, while the area under the PR-curve is the - computed using the height of the precision values by the recall. + `true_negatives`, `false_positives` and `false_negatives` that are used to + compute the AUC. To discretize the AUC curve, a linearly spaced set of + thresholds is used to compute pairs of recall and precision values. The area + under the ROC-curve is therefore computed using the height of the recall + values by the false positive rate, while the area under the PR-curve is the + computed using the height of the precision values by the recall. Args: name: metric name @@ -322,22 +558,32 @@ class Auc(MetricBase): curve. "NOTE: only implement the ROC curve type via Python now." + + Examples: + .. code-block:: python + + pred = fluid.layers.fc(input=data, size=1000, act="tanh") + metric = fluid.metrics.Auc() + for data in train_reader(): + loss, preds, labels = exe.run(fetch_list=[cost, preds, labels]) + metric.update(preds, labels) + numpy_auc = metric.eval() """ def __init__(self, name, curve='ROC', num_thresholds=200): - super(MetricBase, self).__init__(name, curve, num_thresholds) + super(Auc, self).__init__(name=name) self._curve = curve self._num_thresholds = num_thresholds self._epsilon = 1e-6 - self.tp_list = np.ndarray((num_thresholds, )) - self.fn_list = np.ndarray((num_thresholds, )) - self.tn_list = np.ndarray((num_thresholds, )) - self.fp_list = np.ndarray((num_thresholds, )) + self.tp_list = np.zeros((num_thresholds, )) + self.fn_list = np.zeros((num_thresholds, )) + self.tn_list = np.zeros((num_thresholds, )) + self.fp_list = np.zeros((num_thresholds, )) - def update(self, labels, predictions, axis=1): + def update(self, preds, labels): if not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray.") - if not _is_numpy_(predictions): + if not _is_numpy_(preds): raise ValueError("The 'predictions' must be a numpy ndarray.") kepsilon = 1e-7 # to account for floating point imprecisions @@ -345,17 +591,17 @@ class Auc(MetricBase): for i in range(self._num_thresholds - 2)] thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] - # caculate TP, FN, TN, FP count + # calculate TP, FN, TN, FP count for idx_thresh, thresh in enumerate(thresholds): tp, fn, tn, fp = 0, 0, 0, 0 for i, lbl in enumerate(labels): if lbl: - if predictions[i, 0] >= thresh: + if preds[i, 1] >= thresh: tp += 1 else: fn += 1 else: - if predictions[i, 0] >= thresh: + if preds[i, 1] >= thresh: fp += 1 else: tn += 1 diff --git a/python/paddle/fluid/net_drawer.py b/python/paddle/fluid/net_drawer.py index 73946a0721..623a7d3fd0 100644 --- a/python/paddle/fluid/net_drawer.py +++ b/python/paddle/fluid/net_drawer.py @@ -24,7 +24,7 @@ logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) try: - from graphviz import Digraph + from .graphviz import Digraph except ImportError: logger.info( 'Cannot import graphviz, which is required for drawing a network. This ' @@ -77,7 +77,7 @@ def parse_graph(program, graph, var_dict, **kwargs): # fill the known variables for block in program.blocks: for var in block.vars: - if not var_dict.has_key(var): + if var not in var_dict: var_dict[var] = "Feed" temp_id = 0 @@ -93,17 +93,17 @@ def parse_graph(program, graph, var_dict, **kwargs): var_dict[arg] = op.type for e in op.inputs: for arg in e.arguments: - if var_dict.has_key(arg): + if arg in var_dict: graph.edge(**draw_edge(var_dict, op, e, arg)) break # only plot the first block def draw_graph(startup_program, main_program, **kwargs): - if kwargs.has_key("graph_attr"): + if "graph_attr" in kwargs: GRAPH_STYLE.update(kwargs[graph_attr]) - if kwargs.has_key("node_attr"): + if "node_attr" in kwargs: OP_STYLE.update(kwargs[node_attr]) - if kwargs.has_key("edge_attr"): + if "edge_attr" in kwargs: VAR_STYLE.update(kwargs[edge_attr]) graph_id = unique_id() diff --git a/python/paddle/fluid/nets.py b/python/paddle/fluid/nets.py index bbedf6fde0..08480671d8 100644 --- a/python/paddle/fluid/nets.py +++ b/python/paddle/fluid/nets.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -import layers +from . import layers __all__ = [ "simple_img_conv_pool", @@ -26,16 +26,87 @@ def simple_img_conv_pool(input, filter_size, pool_size, pool_stride, - act, - param_attr=None, + pool_padding=0, pool_type='max', + global_pooling=False, + conv_stride=1, + conv_padding=0, + conv_dilation=1, + conv_groups=1, + param_attr=None, + bias_attr=None, + act=None, use_cudnn=True, use_mkldnn=False): + """ + The simple_img_conv_pool is composed with one Convolution2d and one Pool2d. + + Args: + input (Variable): The input image with [N, C, H, W] format. + num_filters(int): The number of filter. It is as same as the output + feature channel. + filter_size (int|list|tuple): The filter size. If filter_size is a list or + tuple, it must contain two integers, (filter_size_H, filter_size_W). Otherwise, + the filter_size_H = filter_size_W = filter_size. + pool_size (int|list|tuple): The pooling size of Pool2d layer. If pool_size + is a list or tuple, it must contain two integers, (pool_size_H, pool_size_W). + Otherwise, the pool_size_H = pool_size_W = pool_size. + pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride + is a list or tuple, it must contain two integers, (pooling_stride_H, pooling_stride_W). + Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride. + pool_padding (int|list|tuple): The padding of Pool2d layer. If pool_padding is a list or + tuple, it must contain two integers, (pool_padding_H, pool_padding_W). + Otherwise, the pool_padding_H = pool_padding_W = pool_padding. Default 0. + pool_type (str): Pooling type can be :math:`max` for max-pooling and :math:`avg` for + average-pooling. Default :math:`max`. + global_pooling (bool): Whether to use the global pooling. If global_pooling = true, + pool_size and pool_padding while be ignored. Default False + conv_stride (int|list|tuple): The stride size of the Conv2d Layer. If stride is a + list or tuple, it must contain two integers, (conv_stride_H, conv_stride_W). Otherwise, + the conv_stride_H = conv_stride_W = conv_stride. Default: conv_stride = 1. + conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is + a list or tuple, it must contain two integers, (conv_padding_H, conv_padding_W). + Otherwise, the conv_padding_H = conv_padding_W = conv_padding. Default: conv_padding = 0. + conv_dilation (int|list|tuple): The dilation size of the Conv2d Layer. If dilation is + a list or tuple, it must contain two integers, (conv_dilation_H, conv_dilation_W). + Otherwise, the conv_dilation_H = conv_dilation_W = conv_dilation. Default: conv_dilation = 1. + conv_groups (int): The groups number of the Conv2d Layer. According to grouped + convolution in Alex Krizhevsky's Deep CNN paper: when group=2, + the first half of the filters is only connected to the first half + of the input channels, while the second half of the filters is only + connected to the second half of the input channels. Default: groups=1 + param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None + bias_attr (ParamAttr): Bias parameter for the Conv2d layer. Default: None + act (str): Activation type for Conv2d. Default: None + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + use_mkldnn (bool): Use mkldnn kernels or not, it is valid only when compiled + with mkldnn library. Default: False + + Return: + Variable: The result of input after Convolution2d and Pool2d. + + Examples: + .. code-block:: python + + img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + conv_pool = fluid.nets.simple_img_conv_pool(input=img, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + """ conv_out = layers.conv2d( input=input, num_filters=num_filters, filter_size=filter_size, + stride=conv_stride, + padding=conv_padding, + dilation=conv_dilation, + groups=conv_groups, param_attr=param_attr, + bias_attr=bias_attr, act=act, use_cudnn=use_cudnn, use_mkldnn=use_mkldnn) @@ -45,6 +116,8 @@ def simple_img_conv_pool(input, pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, + pool_padding=pool_padding, + global_pooling=global_pooling, use_cudnn=use_cudnn, use_mkldnn=use_mkldnn) return pool_out @@ -60,11 +133,65 @@ def img_conv_group(input, conv_with_batchnorm=False, conv_batchnorm_drop_rate=0.0, pool_stride=1, - pool_type=None, + pool_type="max", use_cudnn=True, use_mkldnn=False): """ - Image Convolution Group, Used for vgg net. + The Image Convolution Group is composed of Convolution2d, BatchNorm, DropOut, + and Pool2d. According to the input arguments, img_conv_group will do serials of + computation for Input using Convolution2d, BatchNorm, DropOut, and pass the last + result to Pool2d. + + Args: + input (Variable): The input image with [N, C, H, W] format. + conv_num_filter(list|tuple): Indicates the numbers of filter of this group. + pool_size (int|list|tuple): The pooling size of Pool2d Layer. If pool_size + is a list or tuple, it must contain two integers, (pool_size_H, pool_size_W). + Otherwise, the pool_size_H = pool_size_W = pool_size. + conv_padding (int|list|tuple): The padding size of the Conv2d Layer. If padding is + a list or tuple, its length must be equal to the length of conv_num_filter. + Otherwise the conv_padding of all Conv2d Layers are the same. Default 1. + conv_filter_size (int|list|tuple): The filter size. If filter_size is a list or + tuple, its length must be equal to the length of conv_num_filter. + Otherwise the conv_filter_size of all Conv2d Layers are the same. Default 3. + conv_act (str): Activation type for Conv2d Layer that is not followed by BatchNorm. + Default: None. + param_attr (ParamAttr): The parameters to the Conv2d Layer. Default: None + conv_with_batchnorm (bool|list): Indicates whether to use BatchNorm after Conv2d Layer. + If conv_with_batchnorm is a list, its length must be equal to the length of + conv_num_filter. Otherwise, conv_with_batchnorm indicates whether all the + Conv2d Layer follows a BatchNorm. Default False. + conv_batchnorm_drop_rate (float|list): Indicates the drop_rate of Dropout Layer + after BatchNorm. If conv_batchnorm_drop_rate is a list, its length must be + equal to the length of conv_num_filter. Otherwise, drop_rate of all Dropout + Layers is conv_batchnorm_drop_rate. Default 0.0. + pool_stride (int|list|tuple): The pooling stride of Pool2d layer. If pool_stride + is a list or tuple, it must contain two integers, (pooling_stride_H, + pooling_stride_W). Otherwise, the pooling_stride_H = pooling_stride_W = pool_stride. + Default 1. + pool_type (str): Pooling type can be :math:`max` for max-pooling and :math:`avg` for + average-pooling. Default :math:`max`. + use_cudnn (bool): Use cudnn kernel or not, it is valid only when the cudnn + library is installed. Default: True + use_mkldnn (bool): Use mkldnn kernels or not, it is valid only when compiled + with mkldnn library. Default: False + + Return: + Variable: The final result after serial computation using Convolution2d, + BatchNorm, DropOut, and Pool2d. + + Examples: + .. code-block:: python + + img = fluid.layers.data(name='img', shape=[1, 28, 28], dtype='float32') + conv_pool = fluid.nets.img_conv_group(input=img, + num_channels=3, + conv_padding=1, + conv_num_filter=[3, 3], + conv_filter_size=3, + conv_act="relu", + pool_size=2, + pool_stride=2) """ tmp = input assert isinstance(conv_num_filter, list) or \ @@ -74,6 +201,7 @@ def img_conv_group(input, if not hasattr(obj, '__len__'): return [obj] * len(conv_num_filter) else: + assert len(obj) == len(conv_num_filter) return obj conv_padding = __extend_list__(conv_padding) @@ -82,7 +210,7 @@ def img_conv_group(input, conv_with_batchnorm = __extend_list__(conv_with_batchnorm) conv_batchnorm_drop_rate = __extend_list__(conv_batchnorm_drop_rate) - for i in xrange(len(conv_num_filter)): + for i in range(len(conv_num_filter)): local_conv_act = conv_act if conv_with_batchnorm[i]: local_conv_act = None @@ -119,6 +247,39 @@ def sequence_conv_pool(input, param_attr=None, act="sigmoid", pool_type="max"): + """ + The sequence_conv_pool is composed with Sequence Convolution and Pooling. + + Args: + input (Variable): The input of sequence_conv, which supports variable-time + length input sequence. The underlying of input is a matrix with shape + (T, N), where T is the total time steps in this mini-batch and N is + the input_hidden_size + num_filters(int): The number of filter. + filter_size (int): The filter size. + param_attr (ParamAttr): The parameters to the Sequence_conv Layer. Default: None. + act (str): Activation type for Sequence_conv Layer. Default: "sigmoid". + pool_type (str): Pooling type can be :math:`max` for max-pooling, :math:`average` for + average-pooling, :math:`sum` for sum-pooling, :math:`sqrt` for sqrt-pooling. + Default :math:`max`. + + Return: + Variable: The final result after Sequence Convolution and Pooling. + + Examples: + .. code-block:: python + + input_dim = len(word_dict) + emb_dim = 128 + hid_dim = 512 + data = fluid.layers.data( ame="words", shape=[1], dtype="int64", lod_level=1) + emb = fluid.layers.embedding(input=data, size=[input_dim, emb_dim], is_sparse=True) + seq_conv = fluid.nets.sequence_conv_pool(input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + """ conv_out = layers.sequence_conv( input=input, num_filters=num_filters, @@ -132,9 +293,9 @@ def sequence_conv_pool(input, def glu(input, dim=-1): """ - The gated linear unit composed by split, sigmoid activation and elementwise - multiplication. Specifically, Split the input into two equal sized parts - :math:`a` and :math:`b` along the given dimension and then compute as + The Gated Linear Units(GLU) composed by split, sigmoid activation and element-wise + multiplication. Specifically, Split the input into two equal sized parts, + :math:`a` and :math:`b`, along the given dimension and then compute as following: .. math:: @@ -147,16 +308,16 @@ def glu(input, dim=-1): Args: input (Variable): The input variable which is a Tensor or LoDTensor. dim (int): The dimension along which to split. If :math:`dim < 0`, the - dimension to split along is :math:`rank(input) + dim`. + dimension to split along is :math:`rank(input) + dim`. Default -1. Returns: - Variable: The Tensor variable with half the size of input. + Variable: Variable with half the size of input. Examples: .. code-block:: python - # x is a Tensor variable with shape [3, 6, 9] - fluid.nets.glu(input=x, dim=1) # shape of output: [3, 3, 9] + data = fluid.layers.data(name="words", shape=[3, 6, 9], dtype="float32") + output = fluid.nets.glu(input=data, dim=1) # shape of output: [3, 3, 9] """ a, b = layers.split(input, num_or_sections=2, dim=dim) @@ -189,40 +350,48 @@ def scaled_dot_product_attention(queries, `_. Args: - queries (Variable): The input variable which should be a 3-D Tensor. keys (Variable): The input variable which should be a 3-D Tensor. values (Variable): The input variable which should be a 3-D Tensor. num_heads (int): Head number to compute the scaled dot product - attention. Default value is 1. + attention. Default: 1. dropout_rate (float): The dropout rate to drop the attention weight. - Default value is 0. + Default: 0.0. Returns: - - Variable: A 3-D Tensor computed by multi-head scaled dot product \ - attention. + Variable: A 3-D Tensor computed by multi-head scaled dot product\ + attention. Raises: - ValueError: If input queries, keys, values are not 3-D Tensors. - NOTE: + NOTES: 1. When num_heads > 1, three linear projections are learned respectively - to map input queries, keys and values into queries', keys' and values'. - queries', keys' and values' have the same shapes with queries, keys - and values. - - 1. When num_heads == 1, scaled_dot_product_attention has no learnable - parameters. + to map input queries, keys and values into queries', keys' and values'. + queries', keys' and values' have the same shapes with queries, keys + and values. + 2. When num_heads == 1, scaled_dot_product_attention has no learnable + parameters. Examples: .. code-block:: python - # Suppose q, k, v are Tensors with the following shape: - # q: [3, 5, 9], k: [3, 6, 9], v: [3, 6, 10] - - contexts = fluid.nets.scaled_dot_product_attention(q, k, v) + queries = fluid.layers.data(name="queries", + shape=[3, 5, 9], + dtype="float32", + append_batch_size=False) + queries.stop_gradient = False + keys = fluid.layers.data(name="keys", + shape=[3, 6, 9], + dtype="float32", + append_batch_size=False) + keys.stop_gradient = False + values = fluid.layers.data(name="values", + shape=[3, 6, 10], + dtype="float32", + append_batch_size=False) + values.stop_gradient = False + contexts = fluid.nets.scaled_dot_product_attention(queries, keys, values) contexts.shape # [3, 5, 10] """ if not (len(queries.shape) == len(keys.shape) == len(values.shape) == 3): @@ -319,10 +488,11 @@ def scaled_dot_product_attention(queries, trans_x = layers.transpose(x, perm=[0, 2, 1, 3]) return layers.reshape( x=trans_x, - shape=map(int, [ - trans_x.shape[0], trans_x.shape[1], - trans_x.shape[2] * trans_x.shape[3] - ])) + shape=list( + map(int, [ + trans_x.shape[0], trans_x.shape[1], trans_x.shape[2] * + trans_x.shape[3] + ]))) q, k, v = __compute_qkv(queries, keys, values, num_heads) diff --git a/python/paddle/fluid/op.py b/python/paddle/fluid/op.py index 0b76e94157..93f021a360 100644 --- a/python/paddle/fluid/op.py +++ b/python/paddle/fluid/op.py @@ -12,6 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +import six + import paddle.fluid.core as core import paddle.fluid.proto.framework_pb2 as framework_pb2 @@ -24,13 +26,13 @@ def get_all_op_protos(): protostrs = core.get_all_op_protos() ret_values = [] for pbstr in protostrs: - op_proto = framework_pb2.OpProto.FromString(str(pbstr)) + op_proto = framework_pb2.OpProto.FromString(six.binary_type(pbstr)) ret_values.append(op_proto) return ret_values def is_str(s): - return isinstance(s, str) or isinstance(s, unicode) + return isinstance(s, six.string_types) class OpDescCreationMethod(object): @@ -189,7 +191,7 @@ class OperatorFactory(object): return self.get_op_info(t).method(**kwargs) def types(self): - return self.op_methods.keys() + return list(self.op_methods.keys()) def get_op_info(self, t): if t not in self.op_methods: @@ -197,13 +199,13 @@ class OperatorFactory(object): return self.op_methods.get(t) def get_op_input_names(self, type): - return map(lambda x: x[0], self.get_op_info(type).inputs) + return [x[0] for x in self.get_op_info(type).inputs] def get_op_inputs(self, type): return self.get_op_info(type).inputs def get_op_output_names(self, type): - return map(lambda x: x[0], self.get_op_info(type).outputs) + return [x[0] for x in self.get_op_info(type).outputs] def get_op_outputs(self, type): return self.get_op_info(type).outputs diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index 0a314ddfd7..a07325f46a 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -13,23 +13,23 @@ # limitations under the License. import re from collections import defaultdict -from paddle.fluid.framework import Program -import framework -import layers -from backward import append_backward -from framework import program_guard -import unique_name -from initializer import Constant -from layer_helper import LayerHelper -from regularizer import append_regularization_ops -from clip import append_gradient_clip_ops, error_clip_callback +from paddle.fluid.framework import Program, Variable +from . import framework +from . import layers +from .backward import append_backward +from .framework import program_guard +from . import unique_name +from .initializer import Constant +from .layer_helper import LayerHelper +from .regularizer import append_regularization_ops +from .clip import append_gradient_clip_ops, error_clip_callback from contextlib import contextmanager __all__ = [ - 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', + 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl', 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer', - 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'Adadelta', 'ModelAverage', - 'Optimizer' + 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'RMSPropOptimizer', + 'FtrlOptimizer', 'Adadelta', 'ModelAverage', 'RMSPropOptimizer' ] @@ -41,12 +41,17 @@ class Optimizer(object): but need to use one of it's implementation. """ - def __init__(self, learning_rate, regularization=None): + def __init__(self, + learning_rate, + regularization=None, + LARS_weight_decay=0.0): if not isinstance(learning_rate, float) and \ not isinstance(learning_rate, framework.Variable): raise TypeError("learning rate should be float or Variable") self.regularization = regularization self._learning_rate = learning_rate + # the learning rate type should be inferenced from loss + self._dtype = None # each program should have a independent learning rate # program -> Variable(learning_rate) self._learning_rate_map = dict() @@ -59,9 +64,10 @@ class Optimizer(object): # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...} self._accumulators = defaultdict(lambda: dict()) self.helper = None + self._LARS_weight_decay = LARS_weight_decay def _create_global_learning_rate(self): - lr = self.global_learning_rate() + lr = self._global_learning_rate() if isinstance(lr, framework.Variable): return @@ -77,10 +83,10 @@ class Optimizer(object): name=unique_name.generate("learning_rate"), shape=[1], value=float(self._learning_rate), - dtype='float32', + dtype='float32' if self._dtype == None else self._dtype, persistable=True) - def global_learning_rate(self, program=None): + def _global_learning_rate(self, program=None): """ get global decayed learning rate :return: @@ -98,10 +104,15 @@ class Optimizer(object): # create learning rate variable for every parameter param = param_and_grad[0] param_lr = param.optimize_attr['learning_rate'] - if param_lr == 1.0: - return self.global_learning_rate() + if type(param_lr) == Variable: + # param learning rate has been updated (LARS) + print("returns updated param lr ", param_lr) + return param_lr else: - return self.global_learning_rate() * param_lr + if param_lr == 1.0: + return self._global_learning_rate() + else: + return self._global_learning_rate() * param_lr def _create_accumulators(self, block, parameters): """Create all accumulators needed by the parameters @@ -112,7 +123,7 @@ class Optimizer(object): """ pass - def _finish_update(self, block): + def _finish_update(self, block, parameters_and_grads): """Finish any custom updates needed before completing an optimization step @@ -121,7 +132,7 @@ class Optimizer(object): parameters: list of parameter variables for the optimizer Returns: - list of finish ops or None + None """ pass @@ -174,22 +185,22 @@ class Optimizer(object): format(name, param.name)) return self._accumulators[name][param.name] - def create_optimization_pass(self, - parameters_and_grads, - loss, - startup_program=None): + def _create_optimization_pass(self, + parameters_and_grads, + loss, + startup_program=None): """Add optimization operators to update gradients to variables. Args: - loss: the target that this optimization is for. - parameters_and_grads: a list of (variable, gradient) pair to update. + loss(Variable): the target that this optimization is for. + parameters_and_grads(list(tuple(Variable, Variable))): + a list of (variable, gradient) pair to update. Returns: return_op_list: a list of operators that will complete one step of optimization. This will include parameter update ops, global step update ops and any other custom ops required by subclasses to manage their internal state. - :param startup_program: """ # This is a default implementation of create_optimization_pass that # can be shared by most optimizers. This implementation assumes that @@ -200,6 +211,7 @@ class Optimizer(object): # Create any accumulators program = loss.block.program + self._dtype = loss.dtype with program_guard(program, startup_program): global_block = framework.default_main_program().global_block() start = len(global_block.ops) @@ -207,21 +219,28 @@ class Optimizer(object): self._create_accumulators(loss.block, [p[0] for p in parameters_and_grads]) self._create_global_learning_rate() + if self._LARS_weight_decay > 0.0: + layers.append_LARS(parameters_and_grads, + self._global_learning_rate(), + self._LARS_weight_decay) optimize_ops = [] for param_and_grad in parameters_and_grads: - if param_and_grad[0].trainable is True and param_and_grad[ - 1] is not None: - optimize_op = self._append_optimize_op(loss.block, - param_and_grad) - optimize_ops.append(optimize_op) + if param_and_grad[1] is None: + continue + with param_and_grad[0].block.program.optimized_guard( + param_and_grad): + if param_and_grad[0].trainable is True: + optimize_op = self._append_optimize_op(loss.block, + param_and_grad) + optimize_ops.append(optimize_op) # Get custom finish ops for subclasses # FIXME: Need to fix this once we figure out how to handle dependencies - self._finish_update(loss.block) + self._finish_update(loss.block, parameters_and_grads) end = len(global_block.ops) - return global_block.slice_ops(start, end) + return global_block._slice_ops(start, end) def minimize(self, loss, @@ -244,13 +263,28 @@ class Optimizer(object): params_grads = append_regularization_ops(params_grads, self.regularization) - optimize_ops = self.create_optimization_pass(params_grads, loss, - startup_program) + optimize_ops = self._create_optimization_pass(params_grads, loss, + startup_program) return optimize_ops, params_grads class SGDOptimizer(Optimizer): - """ Simple SGD optimizer without any state. + """ + Optimizer of the stochastic gradient descent algorithm. + + .. math:: + + param\_out = param - learning\_rate * grad + + Args: + learning_rate (float|Variable): the learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + + Examples: + .. code-block:: python + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.2) + sgd_optimizer.minimize(cost) """ def __init__(self, learning_rate, **kwargs): @@ -276,7 +310,37 @@ class SGDOptimizer(Optimizer): class MomentumOptimizer(Optimizer): - """Simple Momentum optimizer with velocity state + """ + + Simple Momentum optimizer with velocity state + + This optimizer has a flag for Nestrov Momentum. + + The update equations are as follows: + + .. math:: + + & velocity = mu * velocity + gradient + + & if (use\_nesterov): + + &\quad param = param - (gradient + mu * velocity) * learning\_rate + + & else: + + &\quad param = param - learning\_rate * velocity + + Args: + learning_rate (float|Variable): the learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + momentum (float): momentum factor + use_nesterov (bool): enables Nesterov momentum + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1) + optimizer.minimize(cost) """ _velocity_acc_str = "velocity" @@ -320,7 +384,32 @@ class MomentumOptimizer(Optimizer): class AdagradOptimizer(Optimizer): - """Simple Adagrad optimizer with moment state + """ + **Adaptive Gradient Algorithm (Adagrad)** + + The update is done as follows: + + .. math:: + + moment\_out &= moment + grad * grad + + param\_out &= param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon} + + The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) + does not have the epsilon attribute. It is added here in our implementation + as also proposed here: http://cs231n.github.io/neural-networks-3/#ada + for numerical stability to avoid the division by zero error. + + Args: + learning_rate (float|Variable): the learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + epsilon (float): a small float value for numerical stability. + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Adagrad(learning_rate=0.2) + optimizer.minimize(cost) """ _moment_acc_str = "moment" @@ -361,10 +450,45 @@ class AdagradOptimizer(Optimizer): class AdamOptimizer(Optimizer): - """Implements the Adam Optimizer + """ + This implements the Adam optimizer from Section 2 of the Adam + paper : https://arxiv.org/abs/1412.6980. + Adam is a first-order gradient-based optimization method based on + adaptive estimates of lower-order moments. + + Adam updates: + + .. math:: + + t & = t + 1 + + moment\_1\_out & = {\\beta}_1 * moment\_1 + (1 - {\\beta}_1) * grad + + moment\_2\_out & = {\\beta}_2 * moment\_2 + (1 - {\\beta}_2) * grad * grad + + learning\_rate & = learning\_rate * \\ + \\frac{\sqrt{1 - {\\beta}_2^t}}{1 - {\\beta}_1^t} + + param\_out & = param - learning\_rate * \\frac{moment\_1}{\sqrt{moment\_2} + \epsilon} + + Args: + learning_rate (float|Variable): the learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + beta1 (float): The exponential decay rate for the 1st moment estimates. + beta2 (float): The exponential decay rate for the 2nd moment estimates. + epsilon (float): a small float value for numerical stability. + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Adam(learning_rate=0.2) + optimizer.minimize(cost) + """ _moment1_acc_str = "moment1" _moment2_acc_str = "moment2" + _beta1_pow_acc_str = "beta1_pow_acc" + _beta2_pow_acc_str = "beta2_pow_acc" def __init__(self, learning_rate=0.001, @@ -386,32 +510,22 @@ class AdamOptimizer(Optimizer): def _create_accumulators(self, block, parameters): assert isinstance(block, framework.Block) - main_block = block.program.global_block() - # Create beta1 and beta2 power tensors - beta_shape = [1] - self._beta1_pow_acc = self.helper.create_global_variable( - name=unique_name.generate('beta1_pow_acc'), - dtype='float32', - shape=beta_shape, - lod_level=0, - persistable=True) - self.helper.set_variable_initializer( - self._beta1_pow_acc, initializer=Constant(self._beta1)) - - self._beta2_pow_acc = self.helper.create_global_variable( - name=unique_name.generate('beta2_pow_acc'), - dtype='float32', - shape=beta_shape, - lod_level=0, - persistable=True) - - self.helper.set_variable_initializer( - self._beta2_pow_acc, initializer=Constant(self._beta2)) - # Create accumulator tensors for first and second moments for p in parameters: self._add_accumulator(self._moment1_acc_str, p) self._add_accumulator(self._moment2_acc_str, p) + self._add_accumulator( + name=self._beta1_pow_acc_str, + param=p, + dtype='float32', + fill_value=self._beta1, + shape=[1]) + self._add_accumulator( + name=self._beta2_pow_acc_str, + param=p, + dtype='float32', + fill_value=self._beta2, + shape=[1]) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -420,6 +534,11 @@ class AdamOptimizer(Optimizer): param_and_grad[0]) moment2 = self._get_accumulator(self._moment2_acc_str, param_and_grad[0]) + beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, + param_and_grad[0]) + beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, + param_and_grad[0]) + # create the adam optimize op adam_op = block.append_op( type=self.type, @@ -429,8 +548,8 @@ class AdamOptimizer(Optimizer): "LearningRate": self._create_param_lr(param_and_grad), "Moment1": moment1, "Moment2": moment2, - "Beta1Pow": self._beta1_pow_acc, - "Beta2Pow": self._beta2_pow_acc + "Beta1Pow": beta1_pow_acc, + "Beta2Pow": beta2_pow_acc }, outputs={ "ParamOut": param_and_grad[0], @@ -445,31 +564,73 @@ class AdamOptimizer(Optimizer): return adam_op - def _finish_update(self, block): + def _finish_update(self, block, param_and_grads): """Update Beta1 and Beta2 Power accumulators """ assert isinstance(block, framework.Block) main_block = block.program.global_block() - scale_beta1 = main_block.append_op( - type="scale", - inputs={"X": self._beta1_pow_acc}, - outputs={"Out": self._beta1_pow_acc}, - attrs={"scale": self._beta1}) + for param, grad in param_and_grads: + if grad is None: + continue + with param.block.program.optimized_guard([param, grad]): + beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, + param) + beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str, + param) + main_block.append_op( + type="scale", + inputs={"X": beta1_pow_acc}, + outputs={"Out": beta1_pow_acc}, + attrs={"scale": self._beta1}) + + main_block.append_op( + type="scale", + inputs={"X": beta2_pow_acc}, + outputs={"Out": beta2_pow_acc}, + attrs={"scale": self._beta2}) - scale_beta2 = main_block.append_op( - type="scale", - inputs={"X": self._beta2_pow_acc}, - outputs={"Out": self._beta2_pow_acc}, - attrs={"scale": self._beta2}) - return [scale_beta1, scale_beta2] +class AdamaxOptimizer(Optimizer): + """ + We implement the Adamax optimizer from Section 7 of the Adam + paper: https://arxiv.org/abs/1412.6980. Adamax is a variant of the + Adam algorithm based on the infinity norm. + Adamax updates: -class AdamaxOptimizer(Optimizer): - """Implements the Adamax Optimizer + .. math:: + + t & = t + 1 + + moment\_out & = {\\beta}_1 * moment + (1 - {\\beta}_1) * grad + + inf\_norm\_out & = max({\\beta}_2 * inf\_norm + \epsilon, |grad|) + + learning\_rate & = \\frac{learning\_rate}{1 - {\\beta}_1^t} + + param\_out & = param - learning\_rate * \\frac{moment\_out}{inf\_norm\_out} + + + The original paper does not have an epsilon attribute. + However, it is added here for numerical stability to prevent the + division by 0 error. + + Args: + learning_rate (float|Variable): the learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + beta1 (float): The exponential decay rate for the 1st moment estimates. + beta2 (float): The exponential decay rate for the 2nd moment estimates. + epsilon (float): a small float value for numerical stability. + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Adamax(learning_rate=0.2) + optimizer.minimize(cost) """ _moment_acc_str = "moment" _inf_norm_acc_str = "inf_norm" + _beta1_pow_acc_str = "beta1_pow_acc" def __init__(self, learning_rate=0.001, @@ -489,21 +650,16 @@ class AdamaxOptimizer(Optimizer): self._epsilon = epsilon def _create_accumulators(self, block, parameters): - # Create beta1 power accumulator tensor - beta_shape = [1] - self._beta1_pow_acc = self.helper.create_global_variable( - name=unique_name.generate('beta1_pow_acc'), - dtype='float32', - shape=beta_shape, - lod_level=0, - persistable=True) - self.helper.set_variable_initializer( - self._beta1_pow_acc, initializer=Constant(self._beta1)) - # Create accumulator tensors for first moment and infinity norm for p in parameters: self._add_accumulator(self._moment_acc_str, p) self._add_accumulator(self._inf_norm_acc_str, p) + self._add_accumulator( + name=self._beta1_pow_acc_str, + param=p, + dtype='float32', + fill_value=self._beta1, + shape=[1]) def _append_optimize_op(self, block, param_and_grad): assert isinstance(block, framework.Block) @@ -511,6 +667,8 @@ class AdamaxOptimizer(Optimizer): moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0]) inf_norm = self._get_accumulator(self._inf_norm_acc_str, param_and_grad[0]) + beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, + param_and_grad[0]) # create the adamax optimize op adamax_op = block.append_op( type=self.type, @@ -520,7 +678,7 @@ class AdamaxOptimizer(Optimizer): "LearningRate": self._create_param_lr(param_and_grad), "Moment": moment, "InfNorm": inf_norm, - "Beta1Pow": self._beta1_pow_acc + "Beta1Pow": beta1_pow_acc }, outputs={ "ParamOut": param_and_grad[0], @@ -535,22 +693,53 @@ class AdamaxOptimizer(Optimizer): return adamax_op - def _finish_update(self, block): + def _finish_update(self, block, parameters_and_grads): """Update Beta1 Power accumulator """ assert isinstance(block, framework.Block) main_block = block.program.global_block() - scale_beta1 = main_block.append_op( - type="scale", - inputs={"X": self._beta1_pow_acc}, - outputs={"Out": self._beta1_pow_acc}, - attrs={"scale": self._beta1}) - - return [scale_beta1] + for param, grad in parameters_and_grads: + if grad is None: + continue + with param.block.program.optimized_guard([param, grad]): + beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str, + param) + main_block.append_op( + type="scale", + inputs={"X": beta1_pow_acc}, + outputs={"Out": beta1_pow_acc}, + attrs={"scale": self._beta1}) class DecayedAdagradOptimizer(Optimizer): - """Simple Decayed Adagrad optimizer with moment state + """ + **Decayed Adagrad Optimizer** + + The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) + + The update is done as follows: + + .. math:: + + moment\_out & = decay * moment + (1 - decay) * grad * grad + + param\_out & = param - \\frac{learning\_rate * grad}{\sqrt{moment\_out} + \epsilon} + + The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) + does not have an epsilon attribute. It is added here for numerical + stability to avoid the division by zero error. + + Args: + learning_rate (float|Variable): the learning rate used to update parameters. \ + Can be a float value or a Variable with one float value as data element. + decay (float): decay rate. + epsilon (float): a small float value for numerical stability. + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.DecayedAdagrad(learning_rate=0.2) + optimizer.minimize(cost) """ _moment_acc_str = "moment" @@ -596,6 +785,7 @@ class DecayedAdagradOptimizer(Optimizer): class AdadeltaOptimizer(Optimizer): """ **Adadelta Optimizer** + Simple Adadelta optimizer with average squared grad state and average squared update state. The details of adadelta please refer to this @@ -610,7 +800,7 @@ class AdadeltaOptimizer(Optimizer): E(dx_t^2) &= \\rho * E(dx_{t-1}^2) + (1-\\rho) * (-g*learning\\_rate)^2 Args: - learning_rate(float): global leraning rate + learning_rate(float): global learning rate rho(float): rho in equation epsilon(float): epsilon in equation @@ -685,37 +875,37 @@ class RMSPropOptimizer(Optimizer): .. math:: - r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ + r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 w & = w - \\frac{\\eta} {\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w) The first equation calculates moving average of the squared gradient for - each weight. Then dividing the gradient by :math: `sqrt{v(w,t)}`. + each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`. In some cases, adding a momentum term :math: `\\beta` is beneficial. In our implementation, Nesterov momentum is used: .. math:: - r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 \\\\ + r(w, t) & = \\rho r(w, t-1) + (1 - \\rho)(\\nabla Q_{i}(w))^2 v(w, t) & = \\beta v(w, t-1) + \\frac{\\eta} {\\sqrt{v(w,t) + \\epsilon}} \\nabla Q_{i}(w) w & = w - v(w, t) - where, :math: `\\rho` is a hyperparameter and typical values are 0.9, 0.95 + where, :math:`\\rho` is a hyperparameter and typical values are 0.9, 0.95 and so on. :math: `beta` is the momentum term. :math: `\\epsilon` is a smoothing term to avoid division by zero, usually set somewhere in range from 1e-4 to 1e-8. Args: - learning_rate(float): global leraning rate. + learning_rate(float): global learning rate. rho(float): rho is :math: `\\rho` in equation, set 0.95 by default. epsilon(float): :math: `\\epsilon` in equation is smoothing term to avoid division by zero, set 1e-6 by default. - momentum(float): :math: `\\beta` in equation is the momentum term, + momentum(float): :math:`\\beta` in equation is the momentum term, set 0.0 by default. Raises: @@ -792,6 +982,113 @@ class RMSPropOptimizer(Optimizer): return rmsprop_op +class FtrlOptimizer(Optimizer): + """ + FTRL (Follow The Regularized Leader) Optimizer. + + The paper that proposed Follow The Regularized Leader (FTRL): + (https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf) + + .. math:: + + &new\_accum = squared\_accum + grad^2 + + &if (lr\_power == -0.5): + + &\quad linear\_accum += grad - \\frac{\\sqrt{new\_accum} - \\sqrt{squared\_accum}}{learning\_rate * param} + + &else: + + &\quad linear\_accum += grad - \\frac{new\_accum^{-lr\_power} - accum^{-lr\_power}}{learning\_rate * param} + + + &x = l1 * sign(linear\_accum) - linear\_accum + + &if (lr\_power == -0.5): + + &\quad y = \\frac{\\sqrt{new\_accum}}{learning\_rate} + (2 * l2) + + &\quad pre\_shrink = \\frac{x}{y} + + &\quad param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0) + + &else: + + &\quad y = \\frac{new\_accum^{-lr\_power}}{learning\_rate} + (2 * l2) + + &\quad pre\_shrink = \\frac{x}{y} + + &\quad param = (abs(linear\_accum) > l1).select(pre\_shrink, 0.0) + + &squared\_accum += grad^2 + + Args: + learning_rate (float|Variable): global learning rate. + l1 (float): + l2 (float): + lr_power (float): + + Raises: + ValueError: If learning_rate, rho, epsilon, momentum are None. + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Ftrl(0.0001) + _, params_grads = optimizer.minimize(cost) + """ + + _squared_acc_str = "squared" + _linear_acc_str = "linear" + + def __init__(self, learning_rate, l1=0.0, l2=0.0, lr_power=-0.5, **kwargs): + super(FtrlOptimizer, self).__init__( + learning_rate=learning_rate, **kwargs) + if learning_rate is None: + raise ValueError("learning_rate is not set.") + + self.type = "ftrl" + self._l1 = l1 + self._l2 = l2 + self._lr_power = lr_power + + def _create_accumulators(self, block, parameters): + if not isinstance(block, framework.Block): + raise TypeError("block is not instance of framework.Block.") + + for p in parameters: + self._add_accumulator(self._squared_acc_str, p) + self._add_accumulator(self._linear_acc_str, p) + + def _append_optimize_op(self, block, param_and_grad): + if not isinstance(block, framework.Block): + raise TypeError("block is not instance of framework.Block.") + + squared_acc = self._get_accumulator(self._squared_acc_str, + param_and_grad[0]) + linear_acc = self._get_accumulator(self._linear_acc_str, + param_and_grad[0]) + ftrl_op = block.append_op( + type=self.type, + inputs={ + "Param": param_and_grad[0], + "Grad": param_and_grad[1], + "SquaredAccumulator": squared_acc, + "LinearAccumulator": linear_acc, + "LearningRate": self._create_param_lr(param_and_grad), + }, + outputs={ + "ParamOut": param_and_grad[0], + "SquaredAccumOut": squared_acc, + "LinearAccumOut": linear_acc + }, + attrs={"l1": self._l1, + "l2": self._l1, + "lr_power": self._lr_power}) + + return ftrl_op + + # We short the class name, since users will use the optimizer with the package # name. The sample code: # @@ -808,6 +1105,7 @@ Adamax = AdamaxOptimizer DecayedAdagrad = DecayedAdagradOptimizer Adadelta = AdadeltaOptimizer RMSProp = RMSPropOptimizer +Ftrl = FtrlOptimizer class ModelAverage(Optimizer): @@ -821,15 +1119,16 @@ class ModelAverage(Optimizer): Args: average_window_rate: The rate of average window. - params_grads: A list of parameter-grad variable pairs. min_average_window: The minimum size of average window. max_average_window: The maximum size of average window. Examples: - ... + + .. code-block:: python + optimizer = fluid.optimizer.Momentum() - _, params_grads = optimizer.minimize(cost) - model_average = fluid.optimizer.ModelAverage(params_grads, 0.15, + optimizer.minimize(cost) + model_average = fluid.optimizer.ModelAverage(0.15, min_average_window=10000, max_average_window=20000) for pass_id in range(args.pass_num): @@ -843,7 +1142,6 @@ class ModelAverage(Optimizer): def __init__(self, average_window_rate, - params_grads=None, min_average_window=10000, max_average_window=10000, **kwargs): @@ -852,24 +1150,22 @@ class ModelAverage(Optimizer): self.min_average_window = min_average_window self.max_average_window = max_average_window - self.params_grads = [] if params_grads is None else params_grads - params = {} - for param, grad in self.params_grads: - if param.do_model_average != False: - params[param.name] = (param, grad) + self.params_grads = [] for param in framework.default_main_program().global_block( ).all_parameters(): - if param.name not in params and param.do_model_average != False: + if param.do_model_average != False: grad = param.block.create_var( name=unique_name.generate(".".join([param.name, 'tmp'])), dtype=param.dtype, persistable=False, stop_gradient=True) - params[param.name] = (param, grad) - self.params_grads = params.values() + self.params_grads.append((param, grad)) for param, grad in self.params_grads: - self._append_average_accumulate_op(param) + if grad is None: + continue + with param.block.program.optimized_guard([param, grad]): + self._append_average_accumulate_op(param) self.apply_program = Program() block = self.apply_program.global_block() @@ -884,29 +1180,31 @@ class ModelAverage(Optimizer): self._add_average_restore_op(block, param_grad) def _add_average_apply_op(self, block, param_grad): - param = block.clone_variable(param_grad[0]) - grad = block.clone_variable(param_grad[1]) - sum_1 = block.clone_variable(self._get_accumulator('sum_1', param)) - sum_2 = block.clone_variable(self._get_accumulator('sum_2', param)) - sum_3 = block.clone_variable(self._get_accumulator('sum_3', param)) - num_accumulates = block.clone_variable( + param = block._clone_variable(param_grad[0]) + grad = block._clone_variable(param_grad[1]) + sum_1 = block._clone_variable(self._get_accumulator('sum_1', param)) + sum_2 = block._clone_variable(self._get_accumulator('sum_2', param)) + sum_3 = block._clone_variable(self._get_accumulator('sum_3', param)) + num_accumulates = block._clone_variable( self._get_accumulator('num_accumulates', param)) - old_num_accumulates = block.clone_variable( + old_num_accumulates = block._clone_variable( self._get_accumulator('old_num_accumulates', param)) - num_updates = block.clone_variable( + num_updates = block._clone_variable( self._get_accumulator('num_updates', param)) # backup param value to grad layers.assign(input=param, output=grad) # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates) tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) - tmp = layers.cast(x=tmp, dtype='float32') - sum = layers.cast(x=sum, dtype='float32') + tmp = layers.cast( + x=tmp, dtype='float32' if self._dtype == None else self._dtype) + sum = layers.cast( + x=sum, dtype='float32' if self._dtype == None else self._dtype) layers.elementwise_div(x=sum, y=tmp, out=param) def _add_average_restore_op(self, block, param_grad): - param = block.clone_variable(param_grad[0]) - grad = block.clone_variable(param_grad[1]) + param = block._clone_variable(param_grad[0]) + grad = block._clone_variable(param_grad[1]) layers.assign(input=grad, output=param) def _append_average_accumulate_op(self, param): diff --git a/python/paddle/fluid/parallel_executor.py b/python/paddle/fluid/parallel_executor.py index 6b80b007e9..2a3555ebdd 100644 --- a/python/paddle/fluid/parallel_executor.py +++ b/python/paddle/fluid/parallel_executor.py @@ -12,128 +12,156 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core +from __future__ import print_function import multiprocessing -import framework -import executor +from . import core +from . import framework +from . import executor import warnings import sys +import os -__all__ = ['ParallelExecutor'] +__all__ = ['ParallelExecutor', 'ExecutionStrategy', 'BuildStrategy'] + +ExecutionStrategy = core.ParallelExecutor.ExecutionStrategy +BuildStrategy = core.ParallelExecutor.BuildStrategy class ParallelExecutor(object): + """ + ParallelExecutor can run program in parallel. + + Args: + use_cuda (bool): Whether to use CUDA or not. + loss_name (str): The loss name must set in training. Default None. + main_program (Program): The program that need to run, if not provided, + then default_main_program will be used. Default None. + share_vars_from(ParallelExecutor): If provied, it will share variables + from the specified ParallelExecutor. Default None. + num_trainers(int): If greater than 1, NCCL will be initialized with + multiple rank of nodes, each node should have same number of GPUs. + Distributed training will be enabled then. Default 1. + trainer_id(int: Must use together with num_trainers. trainer_id is the + "rank" of current node starts from 0. Default 0. + + Returns: + ParallelExecutor: The initialized ParallelExecutor object. + + Raises: + TypeError: If share_vars_from is provided, but not ParallelExecutor object. + + Examples: + .. code-block:: python + + train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name) + test_exe = fluid.ParallelExecutor(use_cuda=True, + main_program=test_program, + share_vars_from=train_exe) + + train_loss, = train_exe.run([loss.name], feed=feed_dict) + test_loss, = test_exe.run([loss.name], feed=feed_dict) + """ + def __init__(self, use_cuda, loss_name=None, main_program=None, - num_threads=None, - allow_op_delay=False, share_vars_from=None, - use_default_grad_scale=True): - """ - ParallelExecutor can run program in parallel. - - Args: - use_cuda(bool): Whether to use CUDA or not. - loss_name(str, default None): The loss name must set in training. - main_program(Program, default None): The program that need to run, - if not provided, then default_main_program will be used. - num_threads(int, default None): How many threads are used for - training. - allow_op_delay(bool, default False): Whether to delay and buffer - some operators together for scheduling or not, which may - improve performance in some cases, default False. - share_vars_from(ParallelExecutor, default None): If provied, - it will share variables from the specified ParallelExecutor. - use_default_grad_scale(bool, default True): If set True, a default - scale value equal to `1./device_count` would be multiplied to - gradients of each device and scaled gradients would be - aggregated. Otherwise, a customized scale value should be fed - to the network. - - Returns: - A ParallelExecutor object. - - Raises: - TypeError: If share_vars_from is provided, but not ParallelExecutor - object. - - Examples: - .. code-block:: python - - train_exe = fluid.ParallelExecutor( - use_cuda=True, loss_name=loss.name) - test_exe = fluid.ParallelExecutor( - use_cuda=True, - main_program=test_program, - share_vars_from=train_exe) - - train_loss, = train_exe.run([loss.name], feed=feed_dict) - test_loss, = test_exe.run([loss.name], feed=feed_dict) - """ + exec_strategy=None, + build_strategy=None, + num_trainers=1, + trainer_id=0, + **kwargs): + if len(kwargs) != 0: + err_msg = "" + for key in kwargs: + if key in dir(ExecutionStrategy): + err_msg += \ + "Setting {0} by constructor is deprecated. Use " \ + "strategy=ExecutionStrategy(); strategy.{0}=xxx; " \ + "pe=ParallelExecutor(exec_strategy=strategy) " \ + "instead.\n ".format(key) + elif key in dir(BuildStrategy): + err_msg += \ + "Setting {0} by constructor is deprecated. Use " \ + "strategy=BuildStrategy(); See help(" \ + "paddle.fluid.ParallelExecutor.BuildStrategy) \n".format( + key) + else: + err_msg += "Setting {0} by constructor is deprecated. Use strategy.\n".format( + key) + raise ValueError(err_msg) self._places = [] self._act_places = [] if use_cuda: - for i in xrange(core.get_cuda_device_count()): + for i in range(core.get_cuda_device_count()): p = core.Place() self._act_places.append(core.CUDAPlace(i)) p.set_place(self._act_places[-1]) self._places.append(p) else: - for i in xrange(multiprocessing.cpu_count()): + cpu_num = int( + os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + for i in range(cpu_num): p = core.Place() self._act_places.append(core.CPUPlace()) p.set_place(self._act_places[-1]) self._places.append(p) assert self._places, "no place for execution" - if num_threads is None: + if exec_strategy is None: + exec_strategy = ExecutionStrategy() + exec_strategy.use_cuda = use_cuda + + if exec_strategy.num_threads == 0: if use_cuda: # Experiments on se-resnext shows that too many threads hurt # performance. Worth tunning for other models in the future. - num_threads = len(self._places) * 2 + exec_strategy.num_threads = len(self._places) * 4 else: - num_threads = min( - len(self._places) * 2, multiprocessing.cpu_count()) + cpu_num = int( + os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + exec_strategy.num_threads = cpu_num * 2 + + if build_strategy is None: + build_strategy = BuildStrategy() main = main_program main = main if main else framework.default_main_program() scope = executor.global_scope() + # FIXME(Yancey1989): it's a temporary approach to determinate the distribute + # train program, call self.bcast_param() at the end of each mini-batch. + self.is_dist = True if "recv" in [ + op.type for op in main.global_block().ops + ] else False if share_vars_from and not isinstance(share_vars_from, ParallelExecutor): raise TypeError("share_vars_from must be ParallelExecutor.") + local_scopes = share_vars_from.executor.local_scopes( ) if share_vars_from else [] self.persistable_vars = [ - v.name - for v in filter( - lambda var: var.persistable and var.type != core.VarDesc.VarType.RAW, - main.list_vars()) + v.name for v in [ + var for var in main.list_vars() + if var.persistable and var.type != core.VarDesc.VarType.RAW + ] ] self.executor = core.ParallelExecutor( - num_threads, - True if use_cuda else False, # use_event self._places, set([ p.name for p in main.global_block().iter_parameters() if not p.stop_gradient ]), - set(self.persistable_vars), - main.desc, - loss_name if loss_name else '', - scope, - local_scopes, - allow_op_delay, - use_default_grad_scale) - + set(self.persistable_vars), main.desc, loss_name + if loss_name else '', scope, local_scopes, exec_strategy, + build_strategy, num_trainers, trainer_id) self.scope = scope - def run(self, fetch_list, feed=None, feed_dict=None): + def run(self, fetch_list, feed=None, feed_dict=None, return_numpy=True): """ Run a parallel executor with fetch_list. @@ -143,12 +171,14 @@ class ParallelExecutor(object): element in the list will be copied to each device directly. For example, if the feed is a dict: + >>> exe = ParallelExecutor() >>> # the image will be splitted into devices. If there is two devices >>> # each device will process an image with shape (24, 1, 28, 28) >>> exe.run(feed={'image': numpy.random.random(size=(48, 1, 28, 28))}) For example, if the feed is a list: + >>> exe = ParallelExecutor() >>> # each device will process each element in the list. >>> # the 1st device will process an image with shape (48, 1, 28, 28) @@ -159,22 +189,48 @@ class ParallelExecutor(object): >>> {"image": numpy.random.random(size=(32, 1, 28, 28))}, >>> ]) - Args: fetch_list(list): The fetched variable names feed(list|dict|None): The feed variables. If the feed is a dict, tensors in that dict will be splitted into each devices. If the feed is a list, each element of the list will be copied - to each device. + to each device. Default None. feed_dict: Alias for feed parameter, for backward compatibility. - This parameter is deprecated. + This parameter has been deprecated. Default None. + return_numpy(bool): Whether converts the fetched tensor to numpy. + Default: True. + + Returns: + List: The fetched result list. + + Raises: + ValueError: If the feed is a list, but its length is not equal the + length of active places, or its element's is not dict. + + NOTES: + 1. If the feed's type is dict, the number of data that feeds to + ParallelExecutor must be bigger than active places. Otherwise, + it will throw exception from C++ side. Special attention should be + paid to check whether the last batch of the dataset is bigger + than active places. + 2. If active places are more than one, the fetch results for each + variable is a list, and each element of this list is the variable of + respective active place. - Returns: fetched result list. + Examples: + .. code-block:: python + pe = fluid.ParallelExecutor(use_cuda=use_cuda, + loss_name=avg_cost.name, + main_program=fluid.default_main_program()) + loss = pe.run(feed=feeder.feed(cur_batch), + fetch_list=[avg_cost.name])) """ if feed is None and feed_dict is not None: feed = feed_dict - print >> sys.stderr, "`feed_dict` is deprecated. Please use `feed=`" + print( + "`feed_dict` is deprecated. Please use `feed=`", + file=sys.stderr) if isinstance(feed, dict): feed_tensor_dict = dict() @@ -215,10 +271,21 @@ class ParallelExecutor(object): fetch_var_name = '@FETCHED_VAR_NAME@' self.executor.run(fetch_list, fetch_var_name) arr = self.scope.find_var(fetch_var_name).get_lod_tensor_array() + + if self.is_dist: + self._bcast_params() + + if return_numpy: + return executor.as_numpy(arr) + return [arr[i] for i in range(len(arr))] - def bcast_params(self): - self.executor.bcast_params(set(self.persistable_vars)) + def _bcast_params(self): + """ + Broadcast the parameters to other devices. It is used during + distributed training. + """ + self.executor._bcast_params(set(self.persistable_vars)) @property def device_count(self): diff --git a/python/paddle/fluid/param_attr.py b/python/paddle/fluid/param_attr.py index 1c6970441b..afae577656 100644 --- a/python/paddle/fluid/param_attr.py +++ b/python/paddle/fluid/param_attr.py @@ -12,8 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from initializer import Initializer, Xavier, Constant -from regularizer import WeightDecayRegularizer +import six + +from .initializer import Initializer, Xavier, Constant +from .regularizer import WeightDecayRegularizer __all__ = [ 'ParamAttr', @@ -22,6 +24,35 @@ __all__ = [ class ParamAttr(object): + """ + Parameter attributes object. To fine-tuning network training process, user + can set parameter's attributes to control training details. Such as learning rate, + regularization, trainable, do_model_average and the method to initialize param. + + + Args: + name(str): The parameter's name. Default None. + initializer(Initializer): The method to initial this parameter. Default None. + learning_rate(float): The parameter's learning rate. The learning rate when + optimize is :math:`global\_lr * parameter\_lr * scheduler\_factor`. + Default 1.0. + regularizer(WeightDecayRegularizer): Regularization factor. Default None. + trainable(bool): Whether this parameter is trainable. Default True. + gradient_clip(BaseGradientClipAttr): The method to clip this parameter's + gradient. Default None. + do_model_average(bool): Whether this parameter should do model average. + Default False. + + Examples: + .. code-block:: python + + w_param_attrs = fluid.ParamAttr(name="fc_weight", + learning_rate=0.5, + regularizer=fluid.L2Decay(1.0), + trainable=True) + y_predict = fluid.layers.fc(input=x, size=10, param_attr=w_param_attrs) + """ + def __init__(self, name=None, initializer=None, @@ -29,7 +60,7 @@ class ParamAttr(object): regularizer=None, trainable=True, gradient_clip=None, - do_model_average=None): + do_model_average=False): self.name = name self.initializer = initializer self.learning_rate = learning_rate @@ -38,7 +69,17 @@ class ParamAttr(object): self.gradient_clip = gradient_clip self.model_average = do_model_average - def set_default_initializer(self, initializer): + def _set_default_initializer(self, initializer): + """ + Set the default initializer, the initializer should be Constant, + Uniform, Normal, Xavier, MSRA. + + Args: + initializer(Initializer): the initializer to set. + + Returns: + None + """ if initializer is None: if self.initializer is None: raise ValueError("ParamAttr.initializer is not set") @@ -49,32 +90,73 @@ class ParamAttr(object): self.initializer = initializer - def set_default_param_initializer(self): - self.set_default_initializer(Xavier()) + def _set_default_param_initializer(self): + """ + Set the default initializer for the parameter with Xavier. + + Args: + None. + + Returns: + None. + """ + self._set_default_initializer(Xavier()) - def set_default_bias_initializer(self): - self.set_default_initializer(Constant(0.0)) + def _set_default_bias_initializer(self): + """ + Set the default initializer for the bias with Constant(0.0). + + Args: + None. + + Returns: + None. + """ + self._set_default_initializer(Constant(0.0)) @staticmethod - def to_attr(arg): + def _to_attr(arg): + """ + Create ParamAttr[s]. + + Args: + arg: Arguments to initialize ParamAttr[s]. arg's type can be + str, Initializer, float, WeightDecayRegularizer, BaseGradientClipAttr, + bool, ParamAttr, or a list of above type. + + Returns: + ParamAttr[s]: ParamAttr[s] initialized with arg. + + Raises: + arg can not initialize a ParamAttr. + """ if arg is None: return ParamAttr() elif isinstance(arg, list) or isinstance(arg, tuple): - return [ParamAttr.to_attr(a) for a in arg] + return [ParamAttr._to_attr(a) for a in arg] elif isinstance(arg, ParamAttr): return arg - elif isinstance(arg, str) or isinstance(arg, unicode): + elif isinstance(arg, six.string_types): return ParamAttr(name=arg) elif isinstance(arg, Initializer): return ParamAttr(initializer=arg) elif isinstance(arg, WeightDecayRegularizer): return ParamAttr(regularizer=arg) elif isinstance(arg, bool): - return ParamAttr.to_attr(None) if arg else False + return ParamAttr._to_attr(None) if arg else False else: raise TypeError("{0} cast to ParamAttr".format(type(arg))) - def to_kwargs(self, with_initializer=False): + def _to_kwargs(self, with_initializer=False): + """ + Returns the attributes of this parameter. + + Args: + with_initializer(bool): Whether to add initializer attr. + + Returns: + Parameter attributes(map): The attributes of this parameter. + """ kwargs = { 'name': self.name, 'optimize_attr': { @@ -92,9 +174,27 @@ class ParamAttr(object): class WeightNormParamAttr(ParamAttr): """ - Used for weight normalization. Any field in ParamAttr can also be set here. - Besides, an extra field dim can be set to indicate the dimension except - which to normalize. + Used for weight Norm. Weight Norm is a reparameterization of the weight vectors + in a neural network that decouples the length of those weight vectors from + their direction. Weight Norm has been implemented as discussed in this + paper: `Weight Normalization: A Simple Reparameterization to Accelerate + Training of Deep Neural Networks + `_. + + Args: + dim(list): The parameter's name. Default None. + kwargs: Any field in ParamAttr. Default None. + + Examples: + .. code-block:: python + + data = fluid.layers.data(name="data", shape=[3, 32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, + size=1000, + param_attr=WeightNormParamAttr( + dim=None, + name='weight_norm_param')) + """ # List to record the parameters reparameterized by weight normalization. # If these parameters are treated as Variable rather than Parameter, diff --git a/python/paddle/fluid/profiler.py b/python/paddle/fluid/profiler.py index 04fd05cc33..01983a8303 100644 --- a/python/paddle/fluid/profiler.py +++ b/python/paddle/fluid/profiler.py @@ -12,11 +12,14 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core +from . import core from contextlib import contextmanager import os -__all__ = ['cuda_profiler', 'reset_profiler', 'profiler'] +__all__ = [ + 'cuda_profiler', 'reset_profiler', 'profiler', 'start_profiler', + 'stop_profiler' +] NVPROF_CONFIG = [ "gpustarttimestamp", @@ -39,6 +42,9 @@ def cuda_profiler(output_file, output_mode=None, config=None): counters/options for profiling by `config` argument. The default config is ['gpustarttimestamp', 'gpustarttimestamp', 'gridsize3d', 'threadblocksize', 'streamid', 'enableonstart 0', 'conckerneltrace']. + Then users can use NVIDIA Visual Profiler + (https://developer.nvidia.com/nvidia-visual-profiler) tools to load this + this output file to visualize results. Args: output_file (string) : The output file name, the result will be @@ -47,6 +53,33 @@ def cuda_profiler(output_file, output_mode=None, config=None): Comma separated values format. It should be 'kvp' or 'csv'. config (list of string) : The profiler options and counters can refer to "Compute Command Line Profiler User Guide". + + Raises: + ValueError: If `output_mode` is not in ['kvp', 'csv']. + + Examples: + + .. code-block:: python + + import paddle.fluid as fluid + import paddle.fluid.profiler as profiler + + epoc = 8 + dshape = [4, 3, 28, 28] + data = fluid.layers.data(name='data', shape=[3, 28, 28], dtype='float32') + conv = fluid.layers.conv2d(data, 20, 3, stride=[1, 1], padding=[1, 1]) + + place = fluid.CUDAPlace(0) + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + + output_file = 'cuda_profiler.txt' + with profiler.cuda_profiler(output_file, 'csv') as nvprof: + for i in range(epoc): + input = np.random.random(dshape).astype('float32') + exe.run(fluid.default_main_program(), feed={'data': input}) + # then use NVIDIA Visual Profiler (nvvp) to load this output file + # to visualize results. """ if output_mode is None: output_mode = 'csv' @@ -66,26 +99,73 @@ def cuda_profiler(output_file, output_mode=None, config=None): def reset_profiler(): - """The profiler clear interface. - reset_profiler will clear the previous time record. + """ + Clear the previous time record. This interface does not work for + `fluid.profiler.cuda_profiler`, it only works for + `fluid.profiler.start_profiler`, `fluid.profiler.stop_profiler`, + and `fluid.profiler.profiler`. + + Examples: + + .. code-block:: python + + import paddle.fluid.profiler as profiler + with profiler.profiler(state, 'total', '/tmp/profile'): + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # ... """ core.reset_profiler() -@contextmanager -def profiler(state, sorted_key=None, profile_path='/tmp/profile'): - """The profiler interface. - Different from cuda_profiler, this profiler can be used to profile both CPU - and GPU program. By defalut, it records the CPU and GPU operator kernels, - if you want to profile other program, you can refer the profiling tutorial - to add more records. +def start_profiler(state): + """ + Enable the profiler. Uers can use `fluid.profiler.start_profiler` and + `fluid.profiler.stop_profiler` to insert the code, except the usage of + `fluid.profiler.profiler` interface. + + Args: + state (string) : The profiling state, which should be 'CPU', 'GPU' + or 'All'. 'CPU' means only profile CPU. 'GPU' means profiling + GPU as well. 'All' also generates timeline. + + Raises: + ValueError: If `state` is not in ['CPU', 'GPU', 'All']. + + Examples: + + .. code-block:: python + + import paddle.fluid.profiler as profiler + + profiler.start_profiler('GPU') + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # except each iteration + profiler.stop_profiler('total', '/tmp/profile') + """ + if core.is_profiler_enabled(): + return + if state not in ['CPU', 'GPU', "All"]: + raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.") + if state == "GPU": + prof_state = core.ProfilerState.kCUDA + elif state == "CPU": + prof_state = core.ProfilerState.kCPU + else: + prof_state = core.ProfilerState.kAll + core.enable_profiler(prof_state) + + +def stop_profiler(sorted_key=None, profile_path='/tmp/profile'): + """ + Stop the profiler. Uers can use `fluid.profiler.start_profiler` and + `fluid.profiler.stop_profiler` to insert the code, except the usage of + `fluid.profiler.profiler` interface. Args: - state (string) : The profiling state, which should be 'CPU' or 'GPU', - telling the profiler to use CPU timer or GPU timer for profiling. - Although users may have already specified the execution place - (CPUPlace/CUDAPlace) in the begining, for flexibility the profiler - would not inherit this place. sorted_key (string) : If None, the profiling results will be printed in the order of first end time of events. Otherwise, the profiling results will be sorted by the this flag. This flag should be one @@ -97,18 +177,26 @@ def profiler(state, sorted_key=None, profile_path='/tmp/profile'): The `ave` means sorting by the average execution time. profile_path (string) : If state == 'All', it will write a profile proto output file. - """ - if state not in ['CPU', 'GPU', "All"]: - raise ValueError("The state must be 'CPU' or 'GPU' or 'All'.") - if state == "GPU": - prof_state = core.ProfilerState.kCUDA - elif state == "CPU": - prof_state = core.ProfilerState.kCPU - else: - prof_state = core.ProfilerState.kAll - core.enable_profiler(prof_state) - yield + Raises: + ValueError: If `sorted_key` is not in + ['calls', 'total', 'max', 'min', 'ave']. + + Examples: + + .. code-block:: python + + import paddle.fluid.profiler as profiler + + profiler.start_profiler('GPU') + for iter in range(10): + if iter == 2: + profiler.reset_profiler() + # except each iteration + profiler.stop_profiler('total', '/tmp/profile') + """ + if not core.is_profiler_enabled(): + return sorted_key = 'default' if sorted_key is None else sorted_key if sorted_key not in ['default', 'calls', 'total', 'max', 'min', 'ave']: raise ValueError("The sorted_key must be None or in 'calls', 'total', " @@ -124,3 +212,58 @@ def profiler(state, sorted_key=None, profile_path='/tmp/profile'): # TODO(qingqing) : redirect C++ ostream to Python stream. # with core.ostream_redirect(stdout=True, stderr=True): core.disable_profiler(key_map[sorted_key], profile_path) + + +@contextmanager +def profiler(state, sorted_key=None, profile_path='/tmp/profile'): + """The profiler interface. + Different from cuda_profiler, this profiler can be used to profile both CPU + and GPU program. By default, it records the CPU and GPU operator kernels, + if you want to profile other program, you can refer the profiling tutorial + to add more records in C++ code. + + If the state == 'All', a profile proto file will be written to + `profile_path`. This file records timeline information during the execution. + Then users can visualize this file to see the timeline, please refer + https://github.com/PaddlePaddle/Paddle/blob/develop/doc/fluid/howto/optimization/timeline.md + + Args: + state (string) : The profiling state, which should be 'CPU' or 'GPU', + telling the profiler to use CPU timer or GPU timer for profiling. + Although users may have already specified the execution place + (CPUPlace/CUDAPlace) in the beginning, for flexibility the profiler + would not inherit this place. + sorted_key (string) : If None, the profiling results will be printed + in the order of first end time of events. Otherwise, the profiling + results will be sorted by the this flag. This flag should be one + of 'calls', 'total', 'max', 'min' or 'ave'. + The `calls` means sorting by the number of calls. + The `total` means sorting by the total execution time. + The `max` means sorting by the maximum execution time. + The `min` means sorting by the minimum execution time. + The `ave` means sorting by the average execution time. + profile_path (string) : If state == 'All', it will write a profile + proto output file. + + Raises: + ValueError: If `state` is not in ['CPU', 'GPU', 'All']. If `sorted_key` is + not in ['calls', 'total', 'max', 'min', 'ave']. + + Examples: + + .. code-block:: python + + import paddle.fluid.profiler as profiler + + with profiler.profiler('All', 'total', '/tmp/profile') as prof: + for pass_id in range(pass_num): + for batch_id, data in enumerate(train_reader()): + exe.run(fluid.default_main_program(), + feed=feeder.feed(data), + fetch_list=[], + use_program_cache=True) + # ... + """ + start_profiler(state) + yield + stop_profiler(sorted_key, profile_path) diff --git a/python/paddle/fluid/recordio_writer.py b/python/paddle/fluid/recordio_writer.py index 5accaacd53..93b38ad3fa 100644 --- a/python/paddle/fluid/recordio_writer.py +++ b/python/paddle/fluid/recordio_writer.py @@ -12,10 +12,12 @@ # See the License for the specific language governing permissions and # limitations under the License. -import core +import os import contextlib - -__all__ = ['convert_reader_to_recordio_file'] +from . import core +__all__ = [ + 'convert_reader_to_recordio_file', 'convert_reader_to_recordio_files' +] @contextlib.contextmanager @@ -34,6 +36,45 @@ def convert_reader_to_recordio_file( compressor=core.RecordIOWriter.Compressor.Snappy, max_num_records=1000, feed_order=None): + """ + Convert a Python Reader to a recordio file. + + Please see :ref:`api_guide_python_reader` and :ref:`api_guide_reader_op` for + details. + + Examples: + + >>> import paddle.fluid as fluid + >>> import paddle.dataset.mnist as mnist + >>> import paddle + >>> + >>> tmp_program = fluid.Program() + >>> with fluid.program_guard(tmp_program): + >>> img = fluid.layers.data(name='img', shape=[784]) + >>> label = fluid.layers.data(name='label', shape=[1], dtype='int64') + >>> feeder = fluid.DataFeeder(feed_list=[img, label], place=fluid.CPUPlace()) + >>> # mnist.recordio will be generated in current directory + >>> fluid.recordio_writer.convert_reader_to_recordio_file( + >>> filename="mnist.recordio", + >>> reader_creator=paddle.batch(mnist.train(), batch_size=32), + >>> feeder=feeder) + + Args: + filename(str): The recordio filename. + reader_creator(callable): The Python Reader Creator. See + :ref:`api_guide_python_reader`. + feeder(DataFeeder): The DataFeeder instance. Used to convert + :code:`reader_creator` to :code: `lod_tensor` + compressor: Must in fluid.core.RecordIOWriter.Compressor.Snappy or + fluid.core.RecordIOWriter.Compressor.NoCompress. Use :code:`Snappy` + by default. + max_num_records(int): Maximum number of records in one chuck. Each record + is each return value from reader function + feed_order(list): The order of variable names that the reader returns + + Returns: + int: the number of record that saved. + """ if feed_order is None: feed_order = feeder.feed_names counter = 0 @@ -46,3 +87,47 @@ def convert_reader_to_recordio_file( writer.complete_append_tensor() counter += 1 return counter + + +def convert_reader_to_recordio_files( + filename, + batch_per_file, + reader_creator, + feeder, + compressor=core.RecordIOWriter.Compressor.Snappy, + max_num_records=1000, + feed_order=None): + """ + convert a python reader to many recordio files. + + This API is basically same as :code:`convert_reader_to_recordio_file`, + instead of it will create many recordio files. Each file contains at + most :code:`batch_per_file` records. + + Please reference + :ref:`api_fluid_recordio_writer_convert_reader_to_recordio_file` for more + details. + """ + if feed_order is None: + feed_order = feeder.feed_names + f_name, f_ext = os.path.splitext(filename) + assert (f_ext == ".recordio") + + lines = [] + f_idx = 0 + counter = 0 + for idx, batch in enumerate(reader_creator()): + lines.append(batch) + if idx >= batch_per_file and idx % batch_per_file == 0: + filename = "%s-%05d%s" % (f_name, f_idx, f_ext) + with create_recordio_writer(filename, compressor, + max_num_records) as writer: + for l in lines: + res = feeder.feed(l) + for each in feed_order: + writer.append_tensor(res[each]) + writer.complete_append_tensor() + counter += 1 + lines = [] + f_idx += 1 + return counter diff --git a/python/paddle/fluid/regularizer.py b/python/paddle/fluid/regularizer.py index c006bd9a66..6eaac4432d 100644 --- a/python/paddle/fluid/regularizer.py +++ b/python/paddle/fluid/regularizer.py @@ -12,13 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -import framework +from . import framework from . import core -__all__ = [ - 'append_regularization_ops', 'WeightDecayRegularizer', 'L1Decay', 'L2Decay', - 'L1DecayRegularizer', 'L2DecayRegularizer' -] +__all__ = ['L1Decay', 'L2Decay', 'L1DecayRegularizer', 'L2DecayRegularizer'] def append_regularization_ops(parameters_and_grads, regularization=None): @@ -36,7 +33,8 @@ def append_regularization_ops(parameters_and_grads, regularization=None): set. It will be applied with regularizer. Returns: - list of (parameters, gradients) pair with the regularized gradient + list[(Variable, Variable)]: list of (parameters, gradients) \ + pair with the regularized gradient Raises: Exception: Unknown regularization type @@ -47,27 +45,27 @@ def append_regularization_ops(parameters_and_grads, regularization=None): if grad is None: params_and_grads.append((param, grad)) continue - - regularization_term = None - if param.regularizer is not None: - # Add variable for regularization term in grad block - regularization_term = param.regularizer(param, grad, grad.block) - elif regularization is not None: - regularization_term = regularization(param, grad, grad.block) - - # If no regularization specified, then we don't need to do anything - if regularization_term is None: + with param.block.program.optimized_guard([param, grad]): + regularization_term = None + if param.regularizer is not None: + # Add variable for regularization term in grad block + regularization_term = param.regularizer(param, grad, grad.block) + elif regularization is not None: + regularization_term = regularization(param, grad, grad.block) + + # If no regularization specified, then we don't need to do anything + if regularization_term is None: + params_and_grads.append((param, grad)) + continue + + assert grad.shape == regularization_term.shape + + grad.block.append_op( + type='elementwise_add', + inputs={"X": grad, + "Y": regularization_term}, + outputs={"Out": grad}) params_and_grads.append((param, grad)) - continue - - assert grad.shape == regularization_term.shape - - grad.block.append_op( - type='elementwise_add', - inputs={"X": grad, - "Y": regularization_term}, - outputs={"Out": grad}) - params_and_grads.append((param, grad)) return params_and_grads @@ -99,6 +97,24 @@ class WeightDecayRegularizer(object): class L2DecayRegularizer(WeightDecayRegularizer): """Implements the L2 Weight Decay Regularization + + Small values of L2 can help prevent over fitting the training data. + + .. math:: + + L2WeightDecay = reg\_coeff * parameter + + Args: + regularization_coeff(float): regularization coeff + + Examples: + .. code-block:: python + + optimizer = fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.1)) + optimizer.minimize(avg_cost) """ def __init__(self, regularization_coeff=0.0): @@ -126,14 +142,20 @@ class L2DecayRegularizer(WeightDecayRegularizer): dtype="float32", shape=param.shape, lod_level=param.lod_level) if grad.type == core.VarDesc.VarType.SELECTED_ROWS: + idx = block.create_var( + dtype="int64", + shape=param.shape, + type=core.VarDesc.VarType.LOD_TENSOR) decay = block.create_var( dtype="float32", shape=param.shape, type=core.VarDesc.VarType.SELECTED_ROWS) + block.append_op( + type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) block.append_op( type='lookup_table', inputs={'W': param, - 'Ids': grad}, + 'Ids': idx}, outputs={'Out': decay}, attrs={'is_sparse': True}) param = decay @@ -153,6 +175,27 @@ class L2DecayRegularizer(WeightDecayRegularizer): class L1DecayRegularizer(WeightDecayRegularizer): """Implements the L1 Weight Decay Regularization + + L1 regularization encourages sparsity. + + .. math:: + + L1WeightDecay = reg\_coeff * sign(parameter) + + Args: + regularization_coeff(float): regularization coeff + + Examples: + .. code-block:: python + + program = fluid.framework.Program() + block = program.global_block() + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + regularizer=fluid.regularizer.L1DecayRegularizer(0.5)) """ def __init__(self, regularization_coeff=0.0): @@ -179,14 +222,20 @@ class L1DecayRegularizer(WeightDecayRegularizer): dtype="float32", shape=param.shape, lod_level=param.lod_level) if grad.type == core.VarDesc.VarType.SELECTED_ROWS: + idx = block.create_var( + dtype="int64", + shape=param.shape, + type=core.VarDesc.VarType.LOD_TENSOR) decay = block.create_var( dtype="float32", shape=param.shape, type=core.VarDesc.VarType.SELECTED_ROWS) + block.append_op( + type='extract_rows', inputs={'X': grad}, outputs={'Out': idx}) block.append_op( type='lookup_table', inputs={'W': param, - 'Ids': grad}, + 'Ids': idx}, outputs={'Out': decay}, attrs={'is_sparse': True}) diff --git a/python/paddle/fluid/tests/book/CMakeLists.txt b/python/paddle/fluid/tests/book/CMakeLists.txt index 673c965b66..ee734f3c78 100644 --- a/python/paddle/fluid/tests/book/CMakeLists.txt +++ b/python/paddle/fluid/tests/book/CMakeLists.txt @@ -5,3 +5,5 @@ string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") foreach(src ${TEST_OPS}) py_test(${src} SRCS ${src}.py) endforeach() + +add_subdirectory(high-level-api) diff --git a/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt new file mode 100644 index 0000000000..efa5ee2d06 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/CMakeLists.txt @@ -0,0 +1,16 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() + +add_subdirectory(fit_a_line) +add_subdirectory(recognize_digits) +add_subdirectory(image_classification) +add_subdirectory(understand_sentiment) +add_subdirectory(label_semantic_roles) +add_subdirectory(word2vec) +add_subdirectory(recommender_system) +add_subdirectory(machine_translation) diff --git a/python/paddle/fluid/tests/book/high-level-api/fit_a_line/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py new file mode 100644 index 0000000000..36a1a223cf --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/fit_a_line/test_fit_a_line.py @@ -0,0 +1,133 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import contextlib +import numpy +import unittest + +# train reader +BATCH_SIZE = 20 + +train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.train(), buf_size=500), + batch_size=BATCH_SIZE) + +test_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.uci_housing.test(), buf_size=500), + batch_size=BATCH_SIZE) + + +def inference_program(): + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + return y_predict + + +def train_program(): + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + y_predict = inference_program() + + loss = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_loss = fluid.layers.mean(loss) + + return avg_loss + + +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.001) + + +def train(use_cuda, train_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + trainer = fluid.Trainer( + train_func=train_program, place=place, optimizer_func=optimizer_func) + + def event_handler(event): + if isinstance(event, fluid.EndStepEvent): + if event.step == 10: + test_metrics = trainer.test( + reader=test_reader, feed_order=['x', 'y']) + print(test_metrics) + ''' + ... + ['25.768919467926025'] + ['15.343549569447836'] + ... + ''' + if params_dirname is not None: + trainer.save_params(params_dirname) + trainer.stop() + + trainer.train( + reader=train_reader, + num_epochs=100, + event_handler=event_handler, + feed_order=['x', 'y']) + + +# infer +def infer(use_cuda, inference_program, params_dirname=None): + if params_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=params_dirname, place=place) + + batch_size = 10 + tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") + + results = inferencer.infer({'x': tensor_x}) + print("infer results: ", results[0]) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + params_dirname = "fit_a_line.inference.model" + + train(use_cuda, train_program, params_dirname) + infer(use_cuda, inference_program, params_dirname) + + +class TestFitALine(unittest.TestCase): + def test_cpu(self): + with self.program_scope_guard(): + with fluid.unique_name.guard(): + main(use_cuda=False) + + def test_cuda(self): + with self.program_scope_guard(): + with fluid.unique_name.guard(): + main(use_cuda=True) + + @contextlib.contextmanager + def program_scope_guard(self): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py new file mode 100644 index 0000000000..9e4c384d92 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/cifar10_small_test_set.py @@ -0,0 +1,83 @@ +# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +CIFAR dataset. + +This module will download dataset from +https://www.cs.toronto.edu/~kriz/cifar.html and parse train/test set into +paddle reader creators. + +The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, +with 6000 images per class. There are 50000 training images and 10000 test +images. + +The CIFAR-100 dataset is just like the CIFAR-10, except it has 100 classes +containing 600 images each. There are 500 training images and 100 testing +images per class. + +""" + +import itertools +import numpy +import paddle.v2.dataset.common +import tarfile +from six.moves import cPickle as pickle +from six.moves import zip + +__all__ = ['train10'] + +URL_PREFIX = 'https://www.cs.toronto.edu/~kriz/' +CIFAR10_URL = URL_PREFIX + 'cifar-10-python.tar.gz' +CIFAR10_MD5 = 'c58f30108f718f92721af3b95e74349a' + + +def reader_creator(filename, sub_name, batch_size=None): + def read_batch(batch): + data = batch['data'] + labels = batch.get('labels', batch.get('fine_labels', None)) + assert labels is not None + for sample, label in zip(data, labels): + yield (sample / 255.0).astype(numpy.float32), int(label) + + def reader(): + with tarfile.open(filename, mode='r') as f: + names = (each_item.name for each_item in f + if sub_name in each_item.name) + + batch_count = 0 + for name in names: + batch = pickle.load(f.extractfile(name)) + for item in read_batch(batch): + if isinstance(batch_size, int) and batch_count > batch_size: + break + batch_count += 1 + yield item + + return reader + + +def train10(batch_size=None): + """ + CIFAR-10 training set creator. + + It returns a reader creator, each sample in the reader is image pixels in + [0, 1] and label in [0, 9]. + + :return: Training reader creator + :rtype: callable + """ + return reader_creator( + paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), + 'data_batch', + batch_size=batch_size) diff --git a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py similarity index 72% rename from python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py rename to python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py index 17db38797c..a1f62db093 100644 --- a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_resnet.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_resnet.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import numpy +import cifar10_small_test_set def resnet_cifar10(input, depth=32): @@ -81,46 +80,53 @@ def train_network(): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) - return avg_cost, accuracy + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, save_path): +def train(use_cuda, train_program, params_dirname): BATCH_SIZE = 128 EPOCH_NUM = 1 train_reader = paddle.batch( paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), - batch_size=BATCH_SIZE) + cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10), + batch_size=BATCH_SIZE, + drop_last=False) test_reader = paddle.batch( - paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE, drop_last=False) def event_handler(event): - if isinstance(event, fluid.EndIteration): - if (event.batch_id % 10) == 0: - avg_cost, accuracy = trainer.test(reader=test_reader) + if isinstance(event, fluid.EndStepEvent): + avg_cost, accuracy = trainer.test( + reader=test_reader, feed_order=['pixel', 'label']) - print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( - event.batch_id + 1, avg_cost, accuracy)) + print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) - if accuracy > 0.01: # Low threshold for speeding up CI - trainer.params.save(save_path) - return + if accuracy > 0.01: # Low threshold for speeding up CI + if params_dirname is not None: + trainer.save_params(params_dirname) + return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_network, - optimizer=fluid.optimizer.Adam(learning_rate=0.001), - place=place, - event_handler=event_handler) - trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) + train_func=train_program, optimizer_func=optimizer_func, place=place) + trainer.train( + reader=train_reader, + num_epochs=EPOCH_NUM, + event_handler=event_handler, + feed_order=['pixel', 'label']) -def infer(use_cuda, save_path): - params = fluid.Params(save_path) + +def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(inference_network, params, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=params_dirname, place=place) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range @@ -135,8 +141,16 @@ def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_resnet.inference.model" - train(use_cuda, save_path) - infer(use_cuda, save_path) + + train( + use_cuda=use_cuda, + train_program=train_network, + params_dirname=save_path) + + infer( + use_cuda=use_cuda, + inference_program=inference_network, + params_dirname=save_path) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py similarity index 67% rename from python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py rename to python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py index e83afeed2f..8429551765 100644 --- a/python/paddle/fluid/tests/book/image_classification/notest_image_classification_vgg.py +++ b/python/paddle/fluid/tests/book/high-level-api/image_classification/test_image_classification_vgg.py @@ -12,11 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import numpy +import cifar10_small_test_set def vgg16_bn_drop(input): @@ -60,46 +59,51 @@ def train_network(): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) accuracy = fluid.layers.accuracy(input=predict, label=label) - return avg_cost, accuracy + return [avg_cost, accuracy] -def train(use_cuda, save_path): - BATCH_SIZE = 128 - EPOCH_NUM = 1 +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) + +def train(use_cuda, train_program, params_dirname): + BATCH_SIZE = 128 train_reader = paddle.batch( paddle.reader.shuffle( - paddle.dataset.cifar.train10(), buf_size=128 * 10), - batch_size=BATCH_SIZE) + cifar10_small_test_set.train10(batch_size=10), buf_size=128 * 10), + batch_size=BATCH_SIZE, + drop_last=False) test_reader = paddle.batch( - paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE) + paddle.dataset.cifar.test10(), batch_size=BATCH_SIZE, drop_last=False) def event_handler(event): - if isinstance(event, fluid.EndIteration): - if (event.batch_id % 10) == 0: - avg_cost, accuracy = trainer.test(reader=test_reader) + if isinstance(event, fluid.EndStepEvent): + avg_cost, accuracy = trainer.test( + reader=test_reader, feed_order=['pixel', 'label']) - print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( - event.batch_id + 1, avg_cost, accuracy)) + print('Loss {0:2.2}, Acc {1:2.2}'.format(avg_cost, accuracy)) - if accuracy > 0.01: # Low threshold for speeding up CI - trainer.params.save(save_path) - return + if accuracy > 0.01: # Low threshold for speeding up CI + if params_dirname is not None: + trainer.save_params(params_dirname) + return place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() trainer = fluid.Trainer( - train_network, - optimizer=fluid.optimizer.Adam(learning_rate=0.001), - place=place, - event_handler=event_handler) - trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) + train_func=train_program, place=place, optimizer_func=optimizer_func) + trainer.train( + reader=train_reader, + num_epochs=1, + event_handler=event_handler, + feed_order=['pixel', 'label']) -def infer(use_cuda, save_path): - params = fluid.Params(save_path) + +def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer(inference_network, params, place=place) + inferencer = fluid.Inferencer( + infer_func=inference_program, param_path=params_dirname, place=place) # The input's dimension of conv should be 4-D or 5-D. # Use normilized image pixels as input data, which should be in the range @@ -114,8 +118,16 @@ def main(use_cuda): if use_cuda and not fluid.core.is_compiled_with_cuda(): return save_path = "image_classification_vgg.inference.model" - train(use_cuda, save_path) - infer(use_cuda, save_path) + + train( + use_cuda=use_cuda, + train_program=train_network, + params_dirname=save_path) + + infer( + use_cuda=use_cuda, + inference_program=inference_network, + params_dirname=save_path) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py new file mode 100755 index 0000000000..e3602e2d56 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/label_semantic_roles/test_label_semantic_roles_newapi.py @@ -0,0 +1,264 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import numpy as np + +WORD_DICT, VERB_DICT, LABEL_DICT = paddle.dataset.conll05.get_dict() +WORD_DICT_LEN = len(WORD_DICT) +LABEL_DICT_LEN = len(LABEL_DICT) +PRED_DICT_LEN = len(VERB_DICT) +MARK_DICT_LEN = 2 +IS_SPARSE = True +BATCH_SIZE = 10 +EMBEDDING_NAME = 'emb' + + +def lstm_net(): + WORD_DIM = 32 + MARK_DIM = 5 + HIDDEN_DIM = 512 + DEPTH = 8 + + # Data definitions + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1) + + # 8 features + predicate_embedding = fluid.layers.embedding( + input=predicate, + size=[PRED_DICT_LEN, WORD_DIM], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr='vemb') + + mark_embedding = fluid.layers.embedding( + input=mark, + size=[MARK_DICT_LEN, MARK_DIM], + dtype='float32', + is_sparse=IS_SPARSE) + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + fluid.layers.embedding( + size=[WORD_DICT_LEN, WORD_DIM], + input=x, + param_attr=fluid.ParamAttr(name=EMBEDDING_NAME)) + for x in word_input + #name=EMBEDDING_NAME, trainable=False)) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0_layers = [ + fluid.layers.fc(input=emb, size=HIDDEN_DIM, act='tanh') + for emb in emb_layers + ] + + hidden_0 = fluid.layers.sums(input=hidden_0_layers) + + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=HIDDEN_DIM, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + + # stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, DEPTH): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=HIDDEN_DIM, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=HIDDEN_DIM, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=HIDDEN_DIM, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + + feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=LABEL_DICT_LEN, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=LABEL_DICT_LEN, act='tanh') + ]) + + return feature_out + + +def inference_program(): + predict = lstm_net() + + return predict + + +def train_program(): + MIX_HIDDEN_LR = 1e-3 + + predict = lstm_net() + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1) + crf_cost = fluid.layers.linear_chain_crf( + input=predict, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=MIX_HIDDEN_LR)) + avg_cost = fluid.layers.mean(crf_cost) + + return [avg_cost] + + +def optimize_func(): + return fluid.optimizer.SGD(learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, decay_steps=100000, decay_rate=0.5, staircase=True)) + + +def train(use_cuda, train_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + trainer = fluid.Trainer( + train_func=train_program, place=place, optimizer_func=optimize_func) + + feed_order = [ + 'word_data', 'ctx_n2_data', 'ctx_n1_data', 'ctx_0_data', 'ctx_p1_data', + 'ctx_p2_data', 'verb_data', 'mark_data', 'target' + ] + + #embedding_param = fluid.global_scope().find_var( + # EMBEDDING_NAME).get_tensor() + #embedding_param.set( + # load_parameter(conll05.get_embedding(), WORD_DICT_LEN, WORD_DIM), + # place) + + def event_handler(event): + if isinstance(event, fluid.EndEpochEvent): + test_reader = paddle.batch( + paddle.dataset.conll05.test(), batch_size=BATCH_SIZE) + avg_cost_set = trainer.test( + reader=test_reader, feed_order=feed_order) + + # get avg cost + avg_cost = np.array(avg_cost_set).mean() + + print("avg_cost: %s" % avg_cost) + + if float(avg_cost) < 100.0: # Large value to increase CI speed + trainer.save_params(params_dirname) + else: + print( + ('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, + float(avg_cost)))) + if math.isnan(float(avg_cost)): + sys.exit("got NaN loss, training failed.") + + elif isinstance(event, fluid.EndStepEvent): + print("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics)))) + if event.step == 1: # Run 2 iterations to speed CI + trainer.save_params(params_dirname) + trainer.stop() + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=BATCH_SIZE) + trainer.train( + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=feed_order) + + +def infer(use_cuda, inference_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + inferencer = fluid.Inferencer( + inference_program, param_path=params_dirname, place=place) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + ctx_n2 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + ctx_n1 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + ctx_0 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + ctx_p1 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + ctx_p2 = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=WORD_DICT_LEN - 1) + pred = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=PRED_DICT_LEN - 1) + mark = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=MARK_DICT_LEN - 1) + + results = inferencer.infer( + { + 'word_data': word, + 'ctx_n2_data': ctx_n2, + 'ctx_n1_data': ctx_n1, + 'ctx_0_data': ctx_0, + 'ctx_p1_data': ctx_p1, + 'ctx_p2_data': ctx_p2, + 'verb_data': pred, + 'mark_data': mark + }, + return_numpy=False) + + print("infer results: ", np.array(results[0]).shape) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "label_semantic_roles.inference.model" + train(use_cuda, train_program, params_dirname) + infer(use_cuda, inference_program, params_dirname) + + +if __name__ == '__main__': + for use_cuda in (False, True): + main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/high-level-api/machine_translation/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/machine_translation/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/machine_translation/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py new file mode 100644 index 0000000000..6fb0c85a8b --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/machine_translation/test_machine_translation.py @@ -0,0 +1,322 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import contextlib + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.framework as framework +import paddle.fluid.layers as pd +from paddle.fluid.executor import Executor +from functools import partial +import unittest +import os + +dict_size = 30000 +source_dict_dim = target_dict_dim = dict_size +hidden_dim = 32 +word_dim = 16 +batch_size = 2 +max_length = 8 +topk_size = 50 +trg_dic_size = 10000 +beam_size = 2 + +decoder_size = hidden_dim + + +def encoder(is_sparse): + # encoder + src_word_id = pd.data( + name="src_word_id", shape=[1], dtype='int64', lod_level=1) + src_embedding = pd.embedding( + input=src_word_id, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + param_attr=fluid.ParamAttr(name='vemb')) + + fc1 = pd.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') + lstm_hidden0, lstm_0 = pd.dynamic_lstm(input=fc1, size=hidden_dim * 4) + encoder_out = pd.sequence_last_step(input=lstm_hidden0) + return encoder_out + + +def train_decoder(context, is_sparse): + # decoder + trg_language_word = pd.data( + name="target_language_word", shape=[1], dtype='int64', lod_level=1) + trg_embedding = pd.embedding( + input=trg_language_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse, + param_attr=fluid.ParamAttr(name='vemb')) + + rnn = pd.DynamicRNN() + with rnn.block(): + current_word = rnn.step_input(trg_embedding) + pre_state = rnn.memory(init=context) + current_state = pd.fc(input=[current_word, pre_state], + size=decoder_size, + act='tanh') + + current_score = pd.fc(input=current_state, + size=target_dict_dim, + act='softmax') + rnn.update_memory(pre_state, current_state) + rnn.output(current_score) + + return rnn() + + +def decode(context, is_sparse): + init_state = context + array_len = pd.fill_constant(shape=[1], dtype='int64', value=max_length) + counter = pd.zeros(shape=[1], dtype='int64', force_cpu=True) + + # fill the first element with init_state + state_array = pd.create_array('float32') + pd.array_write(init_state, array=state_array, i=counter) + + # ids, scores as memory + ids_array = pd.create_array('int64') + scores_array = pd.create_array('float32') + + init_ids = pd.data(name="init_ids", shape=[1], dtype="int64", lod_level=2) + init_scores = pd.data( + name="init_scores", shape=[1], dtype="float32", lod_level=2) + + pd.array_write(init_ids, array=ids_array, i=counter) + pd.array_write(init_scores, array=scores_array, i=counter) + + cond = pd.less_than(x=counter, y=array_len) + + while_op = pd.While(cond=cond) + with while_op.block(): + pre_ids = pd.array_read(array=ids_array, i=counter) + pre_state = pd.array_read(array=state_array, i=counter) + pre_score = pd.array_read(array=scores_array, i=counter) + + # expand the lod of pre_state to be the same with pre_score + pre_state_expanded = pd.sequence_expand(pre_state, pre_score) + + pre_ids_emb = pd.embedding( + input=pre_ids, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=is_sparse) + + # use rnn unit to update rnn + current_state = pd.fc(input=[pre_state_expanded, pre_ids_emb], + size=decoder_size, + act='tanh') + current_state_with_lod = pd.lod_reset(x=current_state, y=pre_score) + # use score to do beam search + current_score = pd.fc(input=current_state_with_lod, + size=target_dict_dim, + act='softmax') + topk_scores, topk_indices = pd.topk(current_score, k=beam_size) + # calculate accumulated scores after topk to reduce computation cost + accu_scores = pd.elementwise_add( + x=pd.log(topk_scores), y=pd.reshape( + pre_score, shape=[-1]), axis=0) + selected_ids, selected_scores = pd.beam_search( + pre_ids, + pre_score, + topk_indices, + accu_scores, + beam_size, + end_id=10, + level=0) + + pd.increment(x=counter, value=1, in_place=True) + + # update the memories + pd.array_write(current_state, array=state_array, i=counter) + pd.array_write(selected_ids, array=ids_array, i=counter) + pd.array_write(selected_scores, array=scores_array, i=counter) + + # update the break condition: up to the max length or all candidates of + # source sentences have ended. + length_cond = pd.less_than(x=counter, y=array_len) + finish_cond = pd.logical_not(pd.is_empty(x=selected_ids)) + pd.logical_and(x=length_cond, y=finish_cond, out=cond) + + translation_ids, translation_scores = pd.beam_search_decode( + ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10) + + # return init_ids, init_scores + + return translation_ids, translation_scores + + +def train_program(is_sparse): + context = encoder(is_sparse) + rnn_out = train_decoder(context, is_sparse) + label = pd.data( + name="target_language_next_word", shape=[1], dtype='int64', lod_level=1) + cost = pd.cross_entropy(input=rnn_out, label=label) + avg_cost = pd.mean(cost) + return avg_cost + + +def optimizer_func(): + return fluid.optimizer.Adagrad( + learning_rate=1e-4, + regularization=fluid.regularizer.L2DecayRegularizer( + regularization_coeff=0.1)) + + +def train(use_cuda, is_sparse, is_local=True): + EPOCH_NUM = 1 + + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=batch_size) + + feed_order = [ + 'src_word_id', 'target_language_word', 'target_language_next_word' + ] + + def event_handler(event): + if isinstance(event, fluid.EndStepEvent): + print('pass_id=' + str(event.epoch) + ' batch=' + str(event.step)) + if event.step == 10: + trainer.stop() + + trainer = fluid.Trainer( + train_func=partial(train_program, is_sparse), + place=place, + optimizer_func=optimizer_func) + + trainer.train( + reader=train_reader, + num_epochs=EPOCH_NUM, + event_handler=event_handler, + feed_order=feed_order) + + +def decode_main(use_cuda, is_sparse): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + context = encoder(is_sparse) + translation_ids, translation_scores = decode(context, is_sparse) + + exe = Executor(place) + exe.run(framework.default_startup_program()) + + init_ids_data = np.array([1 for _ in range(batch_size)], dtype='int64') + init_scores_data = np.array( + [1. for _ in range(batch_size)], dtype='float32') + init_ids_data = init_ids_data.reshape((batch_size, 1)) + init_scores_data = init_scores_data.reshape((batch_size, 1)) + init_recursive_seq_lens = [1] * batch_size + init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens] + + init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens, + place) + init_scores = fluid.create_lod_tensor(init_scores_data, + init_recursive_seq_lens, place) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=batch_size) + + feed_order = ['src_word_id'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + for data in train_data(): + feed_dict = feeder.feed([[x[0]] for x in data]) + feed_dict['init_ids'] = init_ids + feed_dict['init_scores'] = init_scores + + result_ids, result_scores = exe.run( + framework.default_main_program(), + feed=feed_dict, + fetch_list=[translation_ids, translation_scores], + return_numpy=False) + print(result_ids.recursive_sequence_lengths()) + break + + +class TestMachineTranslation(unittest.TestCase): + pass + + +@contextlib.contextmanager +def scope_prog_guard(): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + +def inject_test_train(use_cuda, is_sparse): + f_name = 'test_{0}_{1}_train'.format('cuda' if use_cuda else 'cpu', 'sparse' + if is_sparse else 'dense') + + def f(*args): + with scope_prog_guard(): + train(use_cuda, is_sparse) + + setattr(TestMachineTranslation, f_name, f) + + +def inject_test_decode(use_cuda, is_sparse, decorator=None): + f_name = 'test_{0}_{1}_decode'.format('cuda' + if use_cuda else 'cpu', 'sparse' + if is_sparse else 'dense') + + def f(*args): + with scope_prog_guard(): + decode_main(use_cuda, is_sparse) + + if decorator is not None: + f = decorator(f) + + setattr(TestMachineTranslation, f_name, f) + + +for _use_cuda_ in (False, True): + for _is_sparse_ in (False, True): + inject_test_train(_use_cuda_, _is_sparse_) + +for _use_cuda_ in (False, True): + for _is_sparse_ in (False, True): + + _decorator_ = None + if _use_cuda_: + _decorator_ = unittest.skip( + reason='Beam Search does not support CUDA!') + + inject_test_decode( + is_sparse=_is_sparse_, use_cuda=_use_cuda_, decorator=_decorator_) + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/book/high-level-api/recognize_digits/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_conv.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py similarity index 54% rename from python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_conv.py rename to python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py index a8282c71f8..898807db6f 100644 --- a/python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_conv.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_conv.py @@ -11,9 +11,10 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function + import argparse import paddle.fluid as fluid +import paddle.fluid.core as core import paddle import sys import numpy @@ -21,7 +22,6 @@ import unittest import math import sys import os -import paddle.v2.dataset as dataset BATCH_SIZE = 64 @@ -55,46 +55,62 @@ def train_program(): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) - return avg_cost, acc + return [avg_cost, acc] + +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, save_dirname): + +def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adam(learning_rate=0.001) - trainer = fluid.Trainer(train_program, place=place, optimizer=optimizer) + trainer = fluid.Trainer( + train_func=train_program, + place=place, + optimizer_func=optimizer_func, + parallel=True) def event_handler(event): - if isinstance(event, fluid.EndIteration): - avg_cost, acc = event.values + if isinstance(event, fluid.EndEpochEvent): + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['img', 'label']) + print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) - if (event.batch_id + 1) % 10 == 0: - test_metrics = trainer.test(reader=dataset.mnist.test()) - avg_cost_set = test_metrics[0] - acc_set = test_metrics[1] - - # get test acc and loss - acc = numpy.array(acc_set).mean() - avg_cost = numpy.array(avg_cost_set).mean() - if float(acc) > 0.2: # Smaller value to increase CI speed - trainer.save_params(save_dirname) - else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.batch_id + 1, float(avg_cost), float(acc))) - if math.isnan(float(avg_cost)): - sys.exit("got NaN loss, training failed.") + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(params_dirname) + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + elif isinstance(event, fluid.EndStepEvent): + print( + ("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, + list(map(numpy.array, event.metrics))))) + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) trainer.train( - reader=dataset.mnist.train(), num_pass=100, event_handler=event_handler) + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=['img', 'label']) -def infer(use_cuda, save_dirname=None): +def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - inference_program, param_path=save_dirname, place=place) + infer_func=inference_program, param_path=params_dirname, place=place) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, @@ -106,13 +122,19 @@ def infer(use_cuda, save_dirname=None): def main(use_cuda): - save_dirname = "recognize_digits_conv.inference.model" + params_dirname = "recognize_digits_conv.inference.model" # call train() with is_local argument to run distributed train - train(use_cuda=use_cuda, save_dirname=save_dirname) - infer(use_cuda=use_cuda, save_dirname=save_dirname) + train( + use_cuda=use_cuda, + train_program=train_program, + params_dirname=params_dirname) + infer( + use_cuda=use_cuda, + inference_program=inference_program, + params_dirname=params_dirname) if __name__ == '__main__': - for use_cuda in (False, True): - main(use_cuda=use_cuda) + # for use_cuda in (False, True): + main(use_cuda=core.is_compiled_with_cuda()) diff --git a/python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_mlp.py b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py similarity index 55% rename from python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_mlp.py rename to python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py index 3efa931d58..6dd64be315 100644 --- a/python/paddle/fluid/tests/book/notest_recognize_digits/notest_recognize_digits_mlp.py +++ b/python/paddle/fluid/tests/book/high-level-api/recognize_digits/test_recognize_digits_mlp.py @@ -11,7 +11,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function + import argparse import paddle.fluid as fluid import paddle @@ -21,7 +21,6 @@ import unittest import math import sys import os -import paddle.v2.dataset as dataset BATCH_SIZE = 64 @@ -42,46 +41,54 @@ def train_program(): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) - return avg_cost, acc + return [avg_cost, acc] + + +def optimizer_func(): + return fluid.optimizer.Adam(learning_rate=0.001) -def train(use_cuda, save_dirname): +def train(use_cuda, train_program, params_dirname): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - optimizer = fluid.optimizer.Adam(learning_rate=0.001) - trainer = fluid.Trainer(train_program, place=place, optimizer=optimizer) + trainer = fluid.Trainer( + train_func=train_program, place=place, optimizer_func=optimizer_func) def event_handler(event): - if isinstance(event, fluid.EndIteration): - avg_cost, acc = event.values + if isinstance(event, fluid.EndEpochEvent): + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['img', 'label']) + print("avg_cost: %s" % avg_cost) print("acc : %s" % acc) - if (event.batch_id + 1) % 10 == 0: - test_metrics = trainer.test(reader=dataset.mnist.test()) - avg_cost_set = test_metrics[0] - acc_set = test_metrics[1] - - # get test acc and loss - acc = numpy.array(acc_set).mean() - avg_cost = numpy.array(avg_cost_set).mean() - if float(acc) > 0.2: # Smaller value to increase CI speed - trainer.save_params(save_dirname) - else: - print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( - event.batch_id + 1, float(avg_cost), float(acc))) - if math.isnan(float(avg_cost)): - sys.exit("got NaN loss, training failed.") + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(params_dirname) + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.mnist.train(), buf_size=500), + batch_size=BATCH_SIZE) trainer.train( - reader=dataset.mnist.train(), num_pass=100, event_handler=event_handler) + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=['img', 'label']) -def infer(use_cuda, save_dirname=None): +def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - inference_program, param_path=save_dirname, place=place) + infer_func=inference_program, param_path=params_dirname, place=place) batch_size = 1 tensor_img = numpy.random.uniform(-1.0, 1.0, @@ -93,13 +100,19 @@ def infer(use_cuda, save_dirname=None): def main(use_cuda): - save_dirname = "recognize_digits_mlp.inference.model" + params_dirname = "recognize_digits_mlp.inference.model" # call train() with is_local argument to run distributed train - train(use_cuda=use_cuda, save_dirname=save_dirname) - infer(use_cuda=use_cuda, save_dirname=save_dirname) + train( + use_cuda=use_cuda, + train_program=train_program, + params_dirname=params_dirname) + infer( + use_cuda=use_cuda, + inference_program=inference_program, + params_dirname=params_dirname) if __name__ == '__main__': - for use_cuda in (False, True): - main(use_cuda=use_cuda) + # for use_cuda in (False, True): + main(use_cuda=False) diff --git a/python/paddle/fluid/tests/book/high-level-api/recommender_system/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/recommender_system/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/recommender_system/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py b/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py new file mode 100644 index 0000000000..60f3d8e105 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/recommender_system/test_recommender_system_newapi.py @@ -0,0 +1,261 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import sys +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.layers as layers +import paddle.fluid.nets as nets + +IS_SPARSE = True +USE_GPU = False +BATCH_SIZE = 256 + + +def get_usr_combined_features(): + # FIXME(dzh) : old API integer_value(10) may have range check. + # currently we don't have user configurated check. + + USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1 + + uid = layers.data(name='user_id', shape=[1], dtype='int64') + + usr_emb = layers.embedding( + input=uid, + dtype='float32', + size=[USR_DICT_SIZE, 32], + param_attr='user_table', + is_sparse=IS_SPARSE) + + usr_fc = layers.fc(input=usr_emb, size=32) + + USR_GENDER_DICT_SIZE = 2 + + usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64') + + usr_gender_emb = layers.embedding( + input=usr_gender_id, + size=[USR_GENDER_DICT_SIZE, 16], + param_attr='gender_table', + is_sparse=IS_SPARSE) + + usr_gender_fc = layers.fc(input=usr_gender_emb, size=16) + + USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table) + usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64") + + usr_age_emb = layers.embedding( + input=usr_age_id, + size=[USR_AGE_DICT_SIZE, 16], + is_sparse=IS_SPARSE, + param_attr='age_table') + + usr_age_fc = layers.fc(input=usr_age_emb, size=16) + + USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1 + usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64") + + usr_job_emb = layers.embedding( + input=usr_job_id, + size=[USR_JOB_DICT_SIZE, 16], + param_attr='job_table', + is_sparse=IS_SPARSE) + + usr_job_fc = layers.fc(input=usr_job_emb, size=16) + + concat_embed = layers.concat( + input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1) + + usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return usr_combined_features + + +def get_mov_combined_features(): + + MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1 + + mov_id = layers.data(name='movie_id', shape=[1], dtype='int64') + + mov_emb = layers.embedding( + input=mov_id, + dtype='float32', + size=[MOV_DICT_SIZE, 32], + param_attr='movie_table', + is_sparse=IS_SPARSE) + + mov_fc = layers.fc(input=mov_emb, size=32) + + CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories()) + + category_id = layers.data( + name='category_id', shape=[1], dtype='int64', lod_level=1) + + mov_categories_emb = layers.embedding( + input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_categories_hidden = layers.sequence_pool( + input=mov_categories_emb, pool_type="sum") + + MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict()) + + mov_title_id = layers.data( + name='movie_title', shape=[1], dtype='int64', lod_level=1) + + mov_title_emb = layers.embedding( + input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE) + + mov_title_conv = nets.sequence_conv_pool( + input=mov_title_emb, + num_filters=32, + filter_size=3, + act="tanh", + pool_type="sum") + + concat_embed = layers.concat( + input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1) + + # FIXME(dzh) : need tanh operator + mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh") + + return mov_combined_features + + +def inference_program(): + usr_combined_features = get_usr_combined_features() + mov_combined_features = get_mov_combined_features() + + inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features) + scale_infer = layers.scale(x=inference, scale=5.0) + + return scale_infer + + +def train_program(): + + scale_infer = inference_program() + + label = layers.data(name='score', shape=[1], dtype='float32') + square_cost = layers.square_error_cost(input=scale_infer, label=label) + avg_cost = layers.mean(square_cost) + + return [avg_cost, scale_infer] + + +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.2) + + +def train(use_cuda, train_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + trainer = fluid.Trainer( + train_func=train_program, place=place, optimizer_func=optimizer_func) + + feed_order = [ + 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', + 'movie_title', 'score' + ] + + def event_handler(event): + if isinstance(event, fluid.EndStepEvent): + test_reader = paddle.batch( + paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) + avg_cost_set = trainer.test( + reader=test_reader, feed_order=feed_order) + + # get avg cost + avg_cost = np.array(avg_cost_set).mean() + + print("avg_cost: %s" % avg_cost) + + if float(avg_cost) < 4: # Smaller value to increase CI speed + trainer.save_params(params_dirname) + trainer.stop() + else: + print( + ('BatchID {0}, Test Loss {1:0.2}'.format(event.epoch + 1, + float(avg_cost)))) + if math.isnan(float(avg_cost)): + sys.exit("got NaN loss, training failed.") + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.movielens.train(), buf_size=8192), + batch_size=BATCH_SIZE) + + trainer.train( + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=feed_order) + + +def infer(use_cuda, inference_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + inferencer = fluid.Inferencer( + inference_program, param_path=params_dirname, place=place) + + # Use the first data from paddle.dataset.movielens.test() as input. + # Use create_lod_tensor(data, recursive_sequence_lengths, place) API + # to generate LoD Tensor where `data` is a list of sequences of index + # numbers, `recursive_sequence_lengths` is the length-based level of detail + # (lod) info associated with `data`. + # For example, data = [[10, 2, 3], [2, 3]] means that it contains + # two sequences of indexes, of length 3 and 2, respectively. + # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one + # level of detail info, indicating that `data` consists of two sequences + # of length 3 and 2, respectively. + user_id = fluid.create_lod_tensor([[1]], [[1]], place) + gender_id = fluid.create_lod_tensor([[1]], [[1]], place) + age_id = fluid.create_lod_tensor([[0]], [[1]], place) + job_id = fluid.create_lod_tensor([[10]], [[1]], place) + movie_id = fluid.create_lod_tensor([[783]], [[1]], place) + category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) + movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], [[5]], + place) + + results = inferencer.infer( + { + 'user_id': user_id, + 'gender_id': gender_id, + 'age_id': age_id, + 'job_id': job_id, + 'movie_id': movie_id, + 'category_id': category_id, + 'movie_title': movie_title + }, + return_numpy=False) + + print("infer results: ", np.array(results[0])) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "recommender_system.inference.model" + train( + use_cuda=use_cuda, + train_program=train_program, + params_dirname=params_dirname) + infer( + use_cuda=use_cuda, + inference_program=inference_program, + params_dirname=params_dirname) + + +if __name__ == '__main__': + main(USE_GPU) diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/CMakeLists.txt new file mode 100644 index 0000000000..d71147a85e --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/CMakeLists.txt @@ -0,0 +1,12 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# This test is buggy +# py_test(test_understand_sentiment_dynamic_rnn SRCS +# test_understand_sentiment_dynamic_rnn.py SERIAL) +LIST(REMOVE_ITEM TEST_OPS test_understand_sentiment_dynamic_rnn) + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py new file mode 100644 index 0000000000..24e65d1bd5 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_conv.py @@ -0,0 +1,154 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +from functools import partial +import numpy as np + +CLASS_DIM = 2 +EMB_DIM = 128 +HID_DIM = 512 +BATCH_SIZE = 128 + + +def convolution_net(data, input_dim, class_dim, emb_dim, hid_dim): + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + prediction = fluid.layers.fc(input=[conv_3, conv_4], + size=class_dim, + act="softmax") + return prediction + + +def inference_program(word_dict): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + + dict_dim = len(word_dict) + net = convolution_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM) + return net + + +def train_program(word_dict): + prediction = inference_program(word_dict) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + +def train(use_cuda, train_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + word_dict = paddle.dataset.imdb.word_dict() + trainer = fluid.Trainer( + train_func=partial(train_program, word_dict), + place=place, + optimizer_func=optimizer_func) + + def event_handler(event): + if isinstance(event, fluid.EndEpochEvent): + test_reader = paddle.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['words', 'label']) + + print("avg_cost: %s" % avg_cost) + print("acc : %s" % acc) + + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(params_dirname) + trainer.stop() + + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + elif isinstance(event, fluid.EndStepEvent): + print("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics)))) + if event.step == 1: # Run 2 iterations to speed CI + trainer.save_params(params_dirname) + trainer.stop() + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) + + trainer.train( + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=['words', 'label']) + + +def infer(use_cuda, inference_program, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + word_dict = paddle.dataset.imdb.word_dict() + + inferencer = fluid.Inferencer( + infer_func=partial(inference_program, word_dict), + param_path=params_dirname, + place=place) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) + results = inferencer.infer({'words': tensor_words}) + print("infer results: ", results) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "understand_sentiment_conv.inference.model" + train(use_cuda, train_program, params_dirname) + infer(use_cuda, inference_program, params_dirname) + + +if __name__ == '__main__': + for use_cuda in (False, True): + main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py new file mode 100644 index 0000000000..b3b1505a0f --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_dynamic_rnn.py @@ -0,0 +1,169 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +from functools import partial +import numpy as np + +CLASS_DIM = 2 +EMB_DIM = 128 +BATCH_SIZE = 128 +LSTM_SIZE = 128 + + +def dynamic_rnn_lstm(data, input_dim, class_dim, emb_dim, lstm_size): + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + sentence = fluid.layers.fc(input=emb, size=lstm_size, act='tanh') + + rnn = fluid.layers.DynamicRNN() + with rnn.block(): + word = rnn.step_input(sentence) + prev_hidden = rnn.memory(value=0.0, shape=[lstm_size]) + prev_cell = rnn.memory(value=0.0, shape=[lstm_size]) + + def gate_common(ipt, hidden, size): + gate0 = fluid.layers.fc(input=ipt, size=size, bias_attr=True) + gate1 = fluid.layers.fc(input=hidden, size=size, bias_attr=False) + return gate0 + gate1 + + forget_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + input_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + output_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + cell_gate = fluid.layers.sigmoid(x=gate_common(word, prev_hidden, + lstm_size)) + + cell = forget_gate * prev_cell + input_gate * cell_gate + hidden = output_gate * fluid.layers.tanh(x=cell) + rnn.update_memory(prev_cell, cell) + rnn.update_memory(prev_hidden, hidden) + rnn.output(hidden) + + last = fluid.layers.sequence_last_step(rnn()) + prediction = fluid.layers.fc(input=last, size=class_dim, act="softmax") + return prediction + + +def inference_program(word_dict): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + + dict_dim = len(word_dict) + pred = dynamic_rnn_lstm(data, dict_dim, CLASS_DIM, EMB_DIM, LSTM_SIZE) + return pred + + +def train_program(word_dict): + prediction = inference_program(word_dict) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + +def train(use_cuda, train_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + word_dict = paddle.dataset.imdb.word_dict() + trainer = fluid.Trainer( + train_func=partial(train_program, word_dict), + place=place, + optimizer_func=optimizer_func) + + def event_handler(event): + if isinstance(event, fluid.EndEpochEvent): + test_reader = paddle.batch( + paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['words', 'label']) + + print("avg_cost: %s" % avg_cost) + print("acc : %s" % acc) + + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(params_dirname) + trainer.stop() + + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + elif isinstance(event, fluid.EndStepEvent): + print("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics)))) + if event.step == 1: # Run 2 iterations to speed CI + trainer.save_params(params_dirname) + trainer.stop() + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE) + + trainer.train( + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=['words', 'label']) + + +def infer(use_cuda, inference_program, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + word_dict = paddle.dataset.imdb.word_dict() + + inferencer = fluid.Inferencer( + infer_func=partial(inference_program, word_dict), + param_path=params_dirname, + place=place) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) + results = inferencer.infer({'words': tensor_words}) + print("infer results: ", results) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "understand_sentiment_conv.inference.model" + train(use_cuda, train_program, params_dirname) + infer(use_cuda, inference_program, params_dirname) + + +if __name__ == '__main__': + for use_cuda in (False, True): + main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py new file mode 100644 index 0000000000..25f99ff0fd --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/understand_sentiment/test_understand_sentiment_stacked_lstm.py @@ -0,0 +1,164 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +from functools import partial +import numpy as np + +CLASS_DIM = 2 +EMB_DIM = 128 +HID_DIM = 512 +STACKED_NUM = 3 +BATCH_SIZE = 128 + + +def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): + assert stacked_num % 2 == 1 + + emb = fluid.layers.embedding( + input=data, size=[input_dim, emb_dim], is_sparse=True) + + fc1 = fluid.layers.fc(input=emb, size=hid_dim) + lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) + + inputs = [fc1, lstm1] + + for i in range(2, stacked_num + 1): + fc = fluid.layers.fc(input=inputs, size=hid_dim) + lstm, cell = fluid.layers.dynamic_lstm( + input=fc, size=hid_dim, is_reverse=(i % 2) == 0) + inputs = [fc, lstm] + + fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') + lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') + + prediction = fluid.layers.fc(input=[fc_last, lstm_last], + size=class_dim, + act='softmax') + return prediction + + +def inference_program(word_dict): + data = fluid.layers.data( + name="words", shape=[1], dtype="int64", lod_level=1) + + dict_dim = len(word_dict) + net = stacked_lstm_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM, + STACKED_NUM) + return net + + +def train_program(word_dict): + prediction = inference_program(word_dict) + label = fluid.layers.data(name="label", shape=[1], dtype="int64") + cost = fluid.layers.cross_entropy(input=prediction, label=label) + avg_cost = fluid.layers.mean(cost) + accuracy = fluid.layers.accuracy(input=prediction, label=label) + return [avg_cost, accuracy] + + +def optimizer_func(): + return fluid.optimizer.Adagrad(learning_rate=0.002) + + +def train(use_cuda, train_program, params_dirname): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + word_dict = paddle.dataset.imdb.word_dict() + trainer = fluid.Trainer( + train_func=partial(train_program, word_dict), + place=place, + optimizer_func=optimizer_func) + + def event_handler(event): + if isinstance(event, fluid.EndEpochEvent): + test_reader = paddle.batch( + paddle.dataset.imdb.test(word_dict), + batch_size=BATCH_SIZE, + drop_last=False) + avg_cost, acc = trainer.test( + reader=test_reader, feed_order=['words', 'label']) + + print("avg_cost: %s" % avg_cost) + print("acc : %s" % acc) + + if acc > 0.2: # Smaller value to increase CI speed + trainer.save_params(params_dirname) + trainer.stop() + + else: + print('BatchID {0}, Test Loss {1:0.2}, Acc {2:0.2}'.format( + event.epoch + 1, avg_cost, acc)) + if math.isnan(avg_cost): + sys.exit("got NaN loss, training failed.") + elif isinstance(event, fluid.EndStepEvent): + print("Step {0}, Epoch {1} Metrics {2}".format( + event.step, event.epoch, list(map(np.array, event.metrics)))) + if event.step == 1: # Run 2 iterations to speed CI + trainer.save_params(params_dirname) + trainer.stop() + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.imdb.train(word_dict), buf_size=25000), + batch_size=BATCH_SIZE, + drop_last=False) + + trainer.train( + num_epochs=1, + event_handler=event_handler, + reader=train_reader, + feed_order=['words', 'label']) + + +def infer(use_cuda, inference_program, params_dirname=None): + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + word_dict = paddle.dataset.imdb.word_dict() + + inferencer = fluid.Inferencer( + infer_func=partial(inference_program, word_dict), + param_path=params_dirname, + place=place) + + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=len(word_dict) - 1) + results = inferencer.infer({'words': tensor_words}) + print("infer results: ", results) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + params_dirname = "understand_sentiment_stacked_lstm.inference.model" + train(use_cuda, train_program, params_dirname) + infer(use_cuda, inference_program, params_dirname) + + +if __name__ == '__main__': + for use_cuda in (False, True): + main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/high-level-api/word2vec/CMakeLists.txt b/python/paddle/fluid/tests/book/high-level-api/word2vec/CMakeLists.txt new file mode 100644 index 0000000000..673c965b66 --- /dev/null +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/CMakeLists.txt @@ -0,0 +1,7 @@ +file(GLOB TEST_OPS RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" "test_*.py") +string(REPLACE ".py" "" TEST_OPS "${TEST_OPS}") + +# default test +foreach(src ${TEST_OPS}) + py_test(${src} SRCS ${src}.py) +endforeach() diff --git a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py b/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py similarity index 53% rename from python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py rename to python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py index 35e163dc9d..02e65cf56c 100644 --- a/python/paddle/fluid/tests/book/word2vec/no_test_word2vec_new_api.py +++ b/python/paddle/fluid/tests/book/high-level-api/word2vec/test_word2vec_new_api.py @@ -25,16 +25,6 @@ HIDDEN_SIZE = 256 N = 5 BATCH_SIZE = 32 - -def create_random_lodtensor(lod, place, low, high): - # The range of data elements is [low, high] - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) @@ -80,67 +70,103 @@ def inference_program(is_sparse): def train_program(is_sparse): - next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + # The declaration of 'next_word' must be after the invoking of inference_program, + # or the data input order of train program would be [next_word, firstw, secondw, + # thirdw, forthw], which is not correct. predict_word = inference_program(is_sparse) + next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') cost = fluid.layers.cross_entropy(input=predict_word, label=next_word) avg_cost = fluid.layers.mean(cost) return avg_cost -def train(use_cuda, is_sparse, save_path): +def optimizer_func(): + return fluid.optimizer.SGD(learning_rate=0.001) + + +def train(use_cuda, train_program, params_dirname): train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() def event_handler(event): - print type(event) - if isinstance(event, fluid.EndEpochEvent): - avg_cost = trainer.test(reader=paddle.dataset.imikolov.test( - word_dict, N)) - - if avg_cost < 5.0: - trainer.save_params(save_path) - return + if isinstance(event, fluid.EndStepEvent): + outs = trainer.test( + reader=test_reader, + feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) + avg_cost = outs[0] + print("loss= ", avg_cost) + + if avg_cost < 10.0: + trainer.save_params(params_dirname) + trainer.stop() + if math.isnan(avg_cost): sys.exit("got NaN loss, training failed.") trainer = fluid.Trainer( - partial(train_program, is_sparse), - fluid.optimizer.SGD(learning_rate=0.001), - place=place) + train_func=train_program, optimizer_func=optimizer_func, place=place) + trainer.train( - reader=train_reader, num_epochs=100, event_handler=event_handler) + reader=train_reader, + num_epochs=1, + event_handler=event_handler, + feed_order=['firstw', 'secondw', 'thirdw', 'forthw', 'nextw']) -def infer(use_cuda, is_sparse, save_path): +def infer(use_cuda, inference_program, params_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() inferencer = fluid.Inferencer( - partial(inference_program, is_sparse), - param_path=save_path, - place=place) - - lod = [0, 1] - first_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - second_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - third_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - fourth_word = create_random_lodtensor(lod, place, low=0, high=dict_size - 1) - result = inferencer.infer({ - 'firstw': first_word, - 'secondw': second_word, - 'thirdw': third_word, - 'forthw': fourth_word - }) - print(result) + infer_func=inference_program, param_path=params_dirname, place=place) + + # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word + # is simply an index to look up for the corresponding word vector and hence + # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths, + # which is length-based level of detail (lod) of each LoDTensor, should be [[1]] + # meaning there is only one level of detail and there is only one sequence of + # one word on this level. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[1]] + base_shape = [1] + # The range of random integers is [low, high] + first_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + second_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + third_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + fourth_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + + result = inferencer.infer( + { + 'firstw': first_word, + 'secondw': second_word, + 'thirdw': third_word, + 'forthw': fourth_word + }, + return_numpy=False) + print(np.array(result[0])) def main(use_cuda, is_sparse): if use_cuda and not fluid.core.is_compiled_with_cuda(): return - save_path = "word2vec.inference.model" - train(use_cuda, is_sparse, save_path) - infer(use_cuda, is_sparse, save_path) + params_dirname = "word2vec.inference.model" + + train( + use_cuda=use_cuda, + train_program=partial(train_program, is_sparse), + params_dirname=params_dirname) + + infer( + use_cuda=use_cuda, + inference_program=partial(inference_program, is_sparse), + params_dirname=params_dirname) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/book/label_semantic_roles/no_test_label_semantic_roles.py b/python/paddle/fluid/tests/book/label_semantic_roles/no_test_label_semantic_roles.py deleted file mode 100755 index fe36e55bb5..0000000000 --- a/python/paddle/fluid/tests/book/label_semantic_roles/no_test_label_semantic_roles.py +++ /dev/null @@ -1,228 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import paddle -import paddle.fluid as fluid -import numpy - -WORD_DICT, VERB_DICT, LABEL_DICT = paddle.dataset.conll05.get_dict() -WORD_DICT_LEN = len(WORD_DICT) -LABEL_DICT_LEN = len(LABEL_DICT) -PRED_DICT_LEN = len(VERB_DICT) -MARK_DICT_LEN = 2 - - -def lstm_net(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark): - WORD_DIM = 32 - MARK_DIM = 5 - HIDDEN_DIM = 512 - DEPTH = 8 - EMBEDDING_NAME = 'emb' - - # Data definitions - word = fluid.layers.data( - name='word_data', shape=[1], dtype='int64', lod_level=1) - predicate = fluid.layers.data( - name='verb_data', shape=[1], dtype='int64', lod_level=1) - ctx_n2 = fluid.layers.data( - name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) - ctx_n1 = fluid.layers.data( - name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) - ctx_0 = fluid.layers.data( - name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) - ctx_p1 = fluid.layers.data( - name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) - ctx_p2 = fluid.layers.data( - name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) - mark = fluid.layers.data( - name='mark_data', shape=[1], dtype='int64', lod_level=1) - - # 8 features - predicate_embedding = fluid.layers.embedding( - input=predicate, - size=[PRED_DICT_LEN, WORD_DIM], - dtype='float32', - is_sparse=IS_SPARSE, - param_attr='vemb') - - mark_embedding = fluid.layers.embedding( - input=mark, - size=[MARK_DICT_LEN, MARK_DIM], - dtype='float32', - is_sparse=IS_SPARSE) - - word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] - emb_layers = [ - fluid.layers.embedding( - size=[WORD_DICT_LEN, WORD_DIM], - input=x, - param_attr=fluid.ParamAttr( - name=EMBEDDING_NAME, trainable=False)) for x in word_input - ] - emb_layers.append(predicate_embedding) - emb_layers.append(mark_embedding) - - hidden_0_layers = [ - fluid.layers.fc(input=emb, size=HIDDEN_DIM, act='tanh') - for emb in emb_layers - ] - - hidden_0 = fluid.layers.sums(input=hidden_0_layers) - - lstm_0 = fluid.layers.dynamic_lstm( - input=hidden_0, - size=HIDDEN_DIM, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid') - - # stack L-LSTM and R-LSTM with direct edges - input_tmp = [hidden_0, lstm_0] - - for i in range(1, DEPTH): - mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=HIDDEN_DIM, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=HIDDEN_DIM, act='tanh') - ]) - - lstm = fluid.layers.dynamic_lstm( - input=mix_hidden, - size=HIDDEN_DIM, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid', - is_reverse=((i % 2) == 1)) - - input_tmp = [mix_hidden, lstm] - - feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=LABEL_DICT_LEN, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=LABEL_DICT_LEN, act='tanh') - ]) - - return feature_out - - -def inference_network(): - predict = lstm_net(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, - mark) - - crf_decode = fluid.layers.crf_decoding( - input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) - - return crf_decode - - -def train_network(): - MIX_HIDDEN_LR = 1e-3 - - predict = lstm_net(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, - mark) - target = fluid.layers.data( - name='target', shape=[1], dtype='int64', lod_level=1) - crf_cost = fluid.layers.linear_chain_crf( - input=predict, - label=target, - param_attr=fluid.ParamAttr( - name='crfw', learning_rate=MIX_HIDDEN_LR)) - avg_cost = fluid.layers.mean(crf_cost) - - return avg_cost - - -def train(use_cuda, save_path): - BATCH_SIZE = 128 - EPOCH_NUM = 1 - - train_reader = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.conll05.train(), buf_size=8192), - batch_size=BATCH_SIZE) - test_reader = paddle.batch( - paddle.dataset.conll05.test(), batch_size=BATCH_SIZE) - - def event_handler(event): - if isinstance(event, fluid.EndIteration): - if (event.batch_id % 10) == 0: - avg_cost = trainer.test(reader=test_reader) - - print('BatchID {0:04}, Loss {1:2.2}'.format(event.batch_id + 1, - avg_cost)) - - if avg_cost > 0.01: # Low threshold for speeding up CI - trainer.save_params(save_path) - return - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay( - learning_rate=0.01, - decay_steps=100000, - decay_rate=0.5, - staircase=True)) - trainer = fluid.Trainer(train_network, optimizer=sgd_optimizer, place=place) - trainer.train(train_reader, EPOCH_NUM, event_handler=event_handler) - - -def infer(use_cuda, save_path): - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - inferencer = fluid.Inferencer( - inference_program, param_path=save_path, place=place) - - def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, - [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - # Create an input example - lod = [0, 4, 10] - word = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1) - pred = create_random_lodtensor(lod, place, low=0, high=PRED_DICT_LEN - 1) - ctx_n2 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1) - ctx_n1 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1) - ctx_0 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1) - ctx_p1 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1) - ctx_p2 = create_random_lodtensor(lod, place, low=0, high=WORD_DICT_LEN - 1) - mark = create_random_lodtensor(lod, place, low=0, high=MARK_DICT_LEN - 1) - - results = inferencer.infer({ - 'word_data': word, - 'verb_data': pred, - 'ctx_n2_data': ctx_n2, - 'ctx_n1_data': ctx_n1, - 'ctx_0_data': ctx_0, - 'ctx_p1_data': ctx_p1, - 'ctx_p2_data': ctx_p2, - 'mark_data': mark - }) - - print("infer results: ", results) - - -def main(use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return - save_path = "label_semantic_roles.inference.model" - train(use_cuda, save_path) - infer(use_cuda, save_path) - - -if __name__ == '__main__': - for use_cuda in (False, True): - main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book/notest_understand_sentiment.py b/python/paddle/fluid/tests/book/notest_understand_sentiment.py index 241778e303..ce6342c2da 100644 --- a/python/paddle/fluid/tests/book/notest_understand_sentiment.py +++ b/python/paddle/fluid/tests/book/notest_understand_sentiment.py @@ -11,8 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function +from paddle.fluid.layers.device import get_places import unittest import paddle.fluid as fluid import paddle @@ -125,14 +125,6 @@ def stacked_lstm_net(data, return avg_cost, accuracy, prediction -def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - def train(word_dict, net_method, use_cuda, @@ -152,7 +144,7 @@ def train(word_dict, cost, acc_out, prediction = net_method( data, label, input_dim=dict_dim, class_dim=class_dim) else: - places = fluid.layers.get_places() + places = get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): cost, acc, _ = net_method( @@ -170,7 +162,7 @@ def train(word_dict, assert save_dirname is None adagrad = fluid.optimizer.Adagrad(learning_rate=0.002) - optimize_ops, params_grads = adagrad.minimize(cost) + adagrad.minimize(cost) train_data = paddle.batch( paddle.reader.shuffle( @@ -183,7 +175,7 @@ def train(word_dict, def train_loop(main_program): exe.run(fluid.default_startup_program()) - for pass_id in xrange(PASS_NUM): + for pass_id in range(PASS_NUM): for data in train_data(): cost_val, acc_val = exe.run(main_program, feed=feeder.feed(data), @@ -202,16 +194,16 @@ def train(word_dict, if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -242,9 +234,25 @@ def infer(word_dict, use_cuda, save_dirname=None): word_dict_len = len(word_dict) - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + tensor_words = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -253,7 +261,7 @@ def infer(word_dict, use_cuda, save_dirname=None): feed={feed_target_names[0]: tensor_words}, fetch_list=fetch_targets, return_numpy=False) - print(results[0].lod()) + print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) print("Inference results: ", np_data) diff --git a/python/paddle/fluid/tests/book/test_fit_a_line.py b/python/paddle/fluid/tests/book/test_fit_a_line.py index ecb34699af..37b64fa94a 100644 --- a/python/paddle/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/fluid/tests/book/test_fit_a_line.py @@ -33,7 +33,7 @@ def train(use_cuda, save_dirname, is_local): avg_cost = fluid.layers.mean(cost) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + sgd_optimizer.minimize(avg_cost) BATCH_SIZE = 20 @@ -69,16 +69,16 @@ def train(use_cuda, save_dirname, is_local): if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -110,14 +110,23 @@ def infer(use_cuda, save_dirname=None): # The input's dimension should be 2-D and the second dim is 13 # The input data should be >= 0 batch_size = 10 - tensor_x = numpy.random.uniform(0, 10, - [batch_size, 13]).astype("float32") + + test_reader = paddle.batch( + paddle.dataset.uci_housing.test(), batch_size=batch_size) + + test_data = next(test_reader()) + test_feat = numpy.array( + [data[0] for data in test_data]).astype("float32") + test_label = numpy.array( + [data[1] for data in test_data]).astype("float32") + assert feed_target_names[0] == 'x' results = exe.run(inference_program, - feed={feed_target_names[0]: tensor_x}, + feed={feed_target_names[0]: numpy.array(test_feat)}, fetch_list=fetch_targets) print("infer shape: ", results[0].shape) print("infer results: ", results[0]) + print("ground truth: ", test_label) def main(use_cuda, is_local=True): diff --git a/python/paddle/fluid/tests/book/test_image_classification.py b/python/paddle/fluid/tests/book/test_image_classification.py index dbcdb5766e..de6fe5f140 100644 --- a/python/paddle/fluid/tests/book/test_image_classification.py +++ b/python/paddle/fluid/tests/book/test_image_classification.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import paddle import paddle.fluid as fluid import contextlib @@ -121,11 +119,11 @@ def train(net_type, use_cuda, save_dirname, is_local): avg_cost = fluid.layers.mean(cost) acc = fluid.layers.accuracy(input=predict, label=label) - # Test program + # Test program test_program = fluid.default_main_program().clone(for_test=True) optimizer = fluid.optimizer.Adam(learning_rate=0.001) - optimize_ops, params_grads = optimizer.minimize(avg_cost) + optimizer.minimize(avg_cost) BATCH_SIZE = 128 PASS_NUM = 1 @@ -178,16 +176,16 @@ def train(net_type, use_cuda, save_dirname, is_local): if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": diff --git a/python/paddle/fluid/tests/book/test_label_semantic_roles.py b/python/paddle/fluid/tests/book/test_label_semantic_roles.py index 50ef29c457..b7ac911caf 100644 --- a/python/paddle/fluid/tests/book/test_label_semantic_roles.py +++ b/python/paddle/fluid/tests/book/test_label_semantic_roles.py @@ -36,7 +36,7 @@ depth = 8 mix_hidden_lr = 1e-3 IS_SPARSE = True -PASS_NUM = 100 +PASS_NUM = 10 BATCH_SIZE = 10 embedding_name = 'emb' @@ -76,8 +76,7 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, emb_layers.append(mark_embedding) hidden_0_layers = [ - fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') - for emb in emb_layers + fluid.layers.fc(input=emb, size=hidden_dim) for emb in emb_layers ] hidden_0 = fluid.layers.sums(input=hidden_0_layers) @@ -94,8 +93,8 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, for i in range(1, depth): mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + fluid.layers.fc(input=input_tmp[0], size=hidden_dim), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim) ]) lstm = fluid.layers.dynamic_lstm( @@ -116,29 +115,6 @@ def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, return feature_out -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - def train(use_cuda, save_dirname=None, is_local=True): # define network topology word = fluid.layers.data( @@ -175,19 +151,13 @@ def train(use_cuda, save_dirname=None, is_local=True): decay_steps=100000, decay_rate=0.5, staircase=True)) - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + sgd_optimizer.minimize(avg_cost) # TODO(qiao) # add dependency track and move this config before optimizer crf_decode = fluid.layers.crf_decoding( input=feature_out, param_attr=fluid.ParamAttr(name='crfw')) - chunk_evaluator = fluid.evaluator.ChunkEvaluator( - input=crf_decode, - label=target, - chunk_scheme="IOB", - num_chunk_types=int(math.ceil((label_dict_len - 1) / 2.0))) - train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.conll05.test(), buf_size=8192), @@ -203,7 +173,6 @@ def train(use_cuda, save_dirname=None, is_local=True): def train_loop(main_program): exe.run(fluid.default_startup_program()) - embedding_param = fluid.global_scope().find_var( embedding_name).get_tensor() embedding_param.set( @@ -212,28 +181,20 @@ def train(use_cuda, save_dirname=None, is_local=True): start_time = time.time() batch_id = 0 - for pass_id in xrange(PASS_NUM): - chunk_evaluator.reset(exe) + for pass_id in range(PASS_NUM): for data in train_data(): - cost, precision, recall, f1_score = exe.run( - main_program, - feed=feeder.feed(data), - fetch_list=[avg_cost] + chunk_evaluator.metrics) - pass_precision, pass_recall, pass_f1_score = chunk_evaluator.eval( - exe) + cost = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + cost = cost[0] if batch_id % 10 == 0: - print("avg_cost:" + str(cost) + " precision:" + str( - precision) + " recall:" + str(recall) + " f1_score:" + - str(f1_score) + " pass_precision:" + str( - pass_precision) + " pass_recall:" + str( - pass_recall) + " pass_f1_score:" + str( - pass_f1_score)) + print("avg_cost:" + str(cost)) if batch_id != 0: print("second per batch: " + str((time.time( ) - start_time) / batch_id)) # Set the threshold low to speed up the CI test - if float(pass_precision) > 0.01: + if float(cost) < 60.0: if save_dirname is not None: # TODO(liuyiqun): Change the target to crf_decode fluid.io.save_inference_model(save_dirname, [ @@ -248,16 +209,16 @@ def train(use_cuda, save_dirname=None, is_local=True): if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -286,23 +247,67 @@ def infer(use_cuda, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - lod = [0, 4, 10] - word = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - pred = create_random_lodtensor( - lod, place, low=0, high=pred_dict_len - 1) - ctx_n2 = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - ctx_n1 = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - ctx_0 = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - ctx_p1 = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - ctx_p2 = create_random_lodtensor( - lod, place, low=0, high=word_dict_len - 1) - mark = create_random_lodtensor( - lod, place, low=0, high=mark_dict_len - 1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[3, 4, 2]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for three sentences of + # length 3, 4 and 2, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[3, 4, 2]] + base_shape = [1] + # The range of random integers is [low, high] + word = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) + pred = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=pred_dict_len - 1) + ctx_n2 = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) + ctx_n1 = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) + ctx_0 = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) + ctx_p1 = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) + ctx_p2 = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=word_dict_len - 1) + mark = fluid.create_random_int_lodtensor( + recursive_seq_lens, + base_shape, + place, + low=0, + high=mark_dict_len - 1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -328,7 +333,7 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print(results[0].lod()) + print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) diff --git a/python/paddle/fluid/tests/book/test_machine_translation.py b/python/paddle/fluid/tests/book/test_machine_translation.py index 46c6b9c29a..462faad3e1 100644 --- a/python/paddle/fluid/tests/book/test_machine_translation.py +++ b/python/paddle/fluid/tests/book/test_machine_translation.py @@ -108,7 +108,7 @@ def decoder_decode(context, is_sparse): pre_state = pd.array_read(array=state_array, i=counter) pre_score = pd.array_read(array=scores_array, i=counter) - # expand the lod of pre_state to be the same with pre_score + # expand the recursive_sequence_lengths of pre_state to be the same with pre_score pre_state_expanded = pd.sequence_expand(pre_state, pre_score) pre_ids_emb = pd.embedding( @@ -126,9 +126,19 @@ def decoder_decode(context, is_sparse): current_score = pd.fc(input=current_state_with_lod, size=target_dict_dim, act='softmax') - topk_scores, topk_indices = pd.topk(current_score, k=50) + topk_scores, topk_indices = pd.topk(current_score, k=beam_size) + # calculate accumulated scores after topk to reduce computation cost + accu_scores = pd.elementwise_add( + x=pd.log(topk_scores), y=pd.reshape( + pre_score, shape=[-1]), axis=0) selected_ids, selected_scores = pd.beam_search( - pre_ids, topk_indices, topk_scores, beam_size, end_id=10, level=0) + pre_ids, + pre_score, + topk_indices, + accu_scores, + beam_size, + end_id=10, + level=0) pd.increment(x=counter, value=1, in_place=True) @@ -137,38 +147,20 @@ def decoder_decode(context, is_sparse): pd.array_write(selected_ids, array=ids_array, i=counter) pd.array_write(selected_scores, array=scores_array, i=counter) - pd.less_than(x=counter, y=array_len, cond=cond) + # update the break condition: up to the max length or all candidates of + # source sentences have ended. + length_cond = pd.less_than(x=counter, y=array_len) + finish_cond = pd.logical_not(pd.is_empty(x=selected_ids)) + pd.logical_and(x=length_cond, y=finish_cond, out=cond) translation_ids, translation_scores = pd.beam_search_decode( - ids=ids_array, scores=scores_array) + ids=ids_array, scores=scores_array, beam_size=beam_size, end_id=10) # return init_ids, init_scores return translation_ids, translation_scores -def set_init_lod(data, lod, place): - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod(lod) - return res - - -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = fluid.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - def train_main(use_cuda, is_sparse, is_local=True): if use_cuda and not fluid.core.is_compiled_with_cuda(): return @@ -185,30 +177,32 @@ def train_main(use_cuda, is_sparse, is_local=True): learning_rate=1e-4, regularization=fluid.regularizer.L2DecayRegularizer( regularization_coeff=0.1)) - optimize_ops, params_grads = optimizer.minimize(avg_cost) + optimizer.minimize(avg_cost) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) + feed_order = [ + 'src_word_id', 'target_language_word', 'target_language_next_word' + ] + exe = Executor(place) def train_loop(main_program): exe.run(framework.default_startup_program()) + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + batch_id = 0 - for pass_id in xrange(1): + for pass_id in range(1): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(main_program, - feed={ - 'src_word_id': word_data, - 'target_language_word': trg_word, - 'target_language_next_word': trg_word_next - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + @@ -220,16 +214,16 @@ def train_main(use_cuda, is_sparse, is_local=True): if is_local: train_loop(framework.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -258,29 +252,37 @@ def decode_main(use_cuda, is_sparse): [1. for _ in range(batch_size)], dtype='float32') init_ids_data = init_ids_data.reshape((batch_size, 1)) init_scores_data = init_scores_data.reshape((batch_size, 1)) - init_lod = [i for i in range(batch_size)] + [batch_size] - init_lod = [init_lod, init_lod] + init_recursive_seq_lens = [1] * batch_size + init_recursive_seq_lens = [init_recursive_seq_lens, init_recursive_seq_lens] + + init_ids = fluid.create_lod_tensor(init_ids_data, init_recursive_seq_lens, + place) + init_scores = fluid.create_lod_tensor(init_scores_data, + init_recursive_seq_lens, place) train_data = paddle.batch( paddle.reader.shuffle( paddle.dataset.wmt14.train(dict_size), buf_size=1000), batch_size=batch_size) - for _, data in enumerate(train_data()): - init_ids = set_init_lod(init_ids_data, init_lod, place) - init_scores = set_init_lod(init_scores_data, init_lod, place) - src_word_data = to_lodtensor(map(lambda x: x[0], data), place) + feed_order = ['src_word_id'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + for data in train_data(): + feed_dict = feeder.feed([[x[0]] for x in data]) + feed_dict['init_ids'] = init_ids + feed_dict['init_scores'] = init_scores result_ids, result_scores = exe.run( framework.default_main_program(), - feed={ - 'src_word_id': src_word_data, - 'init_ids': init_ids, - 'init_scores': init_scores - }, + feed=feed_dict, fetch_list=[translation_ids, translation_scores], return_numpy=False) - print result_ids.lod() + print(result_ids.recursive_sequence_lengths()) break diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index c115aa4d7d..3e5f76d12d 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -11,16 +11,18 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function -import argparse -import paddle.fluid as fluid -import paddle -import sys -import numpy -import unittest + +import paddle.fluid.core as core import math -import sys import os +import sys +import unittest + +import numpy + +import paddle +import paddle.fluid as fluid +from paddle.fluid.layers.device import get_places BATCH_SIZE = 64 @@ -76,7 +78,7 @@ def train(nn_type, net_conf = conv_net if parallel: - places = fluid.layers.get_places() + places = get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): img_ = pd.read_input(img) @@ -94,8 +96,8 @@ def train(nn_type, test_program = fluid.default_main_program().clone(for_test=True) - optimizer = fluid.optimizer.Adam(learning_rate=0.001) - optimize_ops, params_grads = optimizer.minimize(avg_loss) + optimizer = fluid.optimizer.Adam(learning_rate=0.001, LARS_weight_decay=0.3) + optimizer.minimize(avg_loss) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -151,16 +153,16 @@ def train(nn_type, if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -255,6 +257,8 @@ def inject_test_method(use_cuda, parallel, nn_type, combine): def inject_all_tests(): for use_cuda in (False, True): + if use_cuda and not core.is_compiled_with_cuda(): + continue for parallel in (False, True): for nn_type in ('mlp', 'conv'): inject_test_method(use_cuda, parallel, nn_type, True) diff --git a/python/paddle/fluid/tests/book/test_recommender_system.py b/python/paddle/fluid/tests/book/test_recommender_system.py index d022dedbff..b30c8771fc 100644 --- a/python/paddle/fluid/tests/book/test_recommender_system.py +++ b/python/paddle/fluid/tests/book/test_recommender_system.py @@ -160,7 +160,7 @@ def train(use_cuda, save_dirname, is_local=True): test_program = fluid.default_main_program().clone(for_test=True) sgd_optimizer = SGDOptimizer(learning_rate=0.2) - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + sgd_optimizer.minimize(avg_cost) place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() @@ -173,63 +173,33 @@ def train(use_cuda, save_dirname, is_local=True): test_reader = paddle.batch( paddle.dataset.movielens.test(), batch_size=BATCH_SIZE) - feeding = { - 'user_id': 0, - 'gender_id': 1, - 'age_id': 2, - 'job_id': 3, - 'movie_id': 4, - 'category_id': 5, - 'movie_title': 6, - 'score': 7 - } - - def func_feed(feeding, data): - feed_tensors = {} - for (key, idx) in feeding.iteritems(): - tensor = fluid.LoDTensor() - if key != "category_id" and key != "movie_title": - if key == "score": - numpy_data = np.array(map(lambda x: x[idx], data)).astype( - "float32") - else: - numpy_data = np.array(map(lambda x: x[idx], data)).astype( - "int64") - else: - numpy_data = map(lambda x: np.array(x[idx]).astype("int64"), - data) - lod_info = [len(item) for item in numpy_data] - offset = 0 - lod = [offset] - for item in lod_info: - offset += item - lod.append(offset) - numpy_data = np.concatenate(numpy_data, axis=0) - tensor.set_lod([lod]) - - numpy_data = numpy_data.reshape([numpy_data.shape[0], 1]) - tensor.set(numpy_data, place) - feed_tensors[key] = tensor - return feed_tensors + feed_order = [ + 'user_id', 'gender_id', 'age_id', 'job_id', 'movie_id', 'category_id', + 'movie_title', 'score' + ] def train_loop(main_program): exe.run(framework.default_startup_program()) + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + PASS_NUM = 100 for pass_id in range(PASS_NUM): for batch_id, data in enumerate(train_reader()): # train a mini-batch outs = exe.run(program=main_program, - feed=func_feed(feeding, data), + feed=feeder.feed(data), fetch_list=[avg_cost]) out = np.array(outs[0]) if (batch_id + 1) % 10 == 0: avg_cost_set = [] for test_data in test_reader(): - avg_cost_np = exe.run( - program=test_program, - feed=func_feed(feeding, test_data), - fetch_list=[avg_cost]) + avg_cost_np = exe.run(program=test_program, + feed=feeder.feed(test_data), + fetch_list=[avg_cost]) avg_cost_set.append(avg_cost_np[0]) break # test only 1 segment for speeding up CI @@ -250,16 +220,16 @@ def train(use_cuda, save_dirname, is_local=True): if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -279,23 +249,6 @@ def infer(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = fluid.Executor(place) - def create_lod_tensor(data, lod=None): - tensor = fluid.LoDTensor() - if lod is None: - # Tensor, the shape is [batch_size, 1] - index = 0 - lod_0 = [index] - for l in range(len(data)): - index += 1 - lod_0.append(index) - lod = [lod_0] - tensor.set_lod(lod) - - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - tensor.set(flattened_data, place) - return tensor - inference_scope = fluid.core.Scope() with fluid.scope_guard(inference_scope): # Use fluid.io.load_inference_model to obtain the inference program desc, @@ -307,26 +260,35 @@ def infer(use_cuda, save_dirname=None): # Use the first data from paddle.dataset.movielens.test() as input assert feed_target_names[0] == "user_id" - user_id = create_lod_tensor([[1]]) + # Use create_lod_tensor(data, recursive_sequence_lengths, place) API + # to generate LoD Tensor where `data` is a list of sequences of index + # numbers, `recursive_sequence_lengths` is the length-based level of detail + # (lod) info associated with `data`. + # For example, data = [[10, 2, 3], [2, 3]] means that it contains + # two sequences of indexes, of length 3 and 2, respectively. + # Correspondingly, recursive_sequence_lengths = [[3, 2]] contains one + # level of detail info, indicating that `data` consists of two sequences + # of length 3 and 2, respectively. + user_id = fluid.create_lod_tensor([[1]], [[1]], place) assert feed_target_names[1] == "gender_id" - gender_id = create_lod_tensor([[1]]) + gender_id = fluid.create_lod_tensor([[1]], [[1]], place) assert feed_target_names[2] == "age_id" - age_id = create_lod_tensor([[0]]) + age_id = fluid.create_lod_tensor([[0]], [[1]], place) assert feed_target_names[3] == "job_id" - job_id = create_lod_tensor([[10]]) + job_id = fluid.create_lod_tensor([[10]], [[1]], place) assert feed_target_names[4] == "movie_id" - movie_id = create_lod_tensor([[783]]) + movie_id = fluid.create_lod_tensor([[783]], [[1]], place) assert feed_target_names[5] == "category_id" - category_id = create_lod_tensor([[10], [8], [9]], [[0, 3]]) + category_id = fluid.create_lod_tensor([[10, 8, 9]], [[3]], place) assert feed_target_names[6] == "movie_title" - movie_title = create_lod_tensor([[1069], [4140], [2923], [710], [988]], - [[0, 5]]) + movie_title = fluid.create_lod_tensor([[1069, 4140, 2923, 710, 988]], + [[5]], place) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. diff --git a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py similarity index 85% rename from python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py rename to python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py index ce640dece8..2e79be2bd0 100644 --- a/python/paddle/fluid/tests/book/notest_rnn_encoder_decoder.py +++ b/python/paddle/fluid/tests/book/test_rnn_encoder_decoder.py @@ -152,29 +152,6 @@ def seq_to_seq_net(): return avg_cost, prediction -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = core.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - -def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - def train(use_cuda, save_dirname=None): [avg_cost, prediction] = seq_to_seq_net() @@ -188,22 +165,20 @@ def train(use_cuda, save_dirname=None): place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() exe = Executor(place) - exe.run(framework.default_startup_program()) + feed_order = ['source_sequence', 'target_sequence', 'label_sequence'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + batch_id = 0 - for pass_id in xrange(2): + for pass_id in range(2): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) - outs = exe.run(framework.default_main_program(), - feed={ - 'source_sequence': word_data, - 'target_sequence': trg_word, - 'label_sequence': trg_word_next - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) @@ -237,9 +212,23 @@ def infer(use_cuda, save_dirname=None): [inference_program, feed_target_names, fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) - lod = [0, 4, 10] - word_data = create_random_lodtensor(lod, place, low=0, high=1) - trg_word = create_random_lodtensor(lod, place, low=0, high=1) + # Setup input by creating LoDTensor to represent sequence of words. + # Here each word is the basic element of the LoDTensor and the shape of + # each word (base_shape) should be [1] since it is simply an index to + # look up for the corresponding word vector. + # Suppose the recursive_sequence_lengths info is set to [[4, 6]], + # which has only one level of detail. Then the created LoDTensor will have only + # one higher level structure (sequence of words, or sentence) than the basic + # element (word). Hence the LoDTensor will hold data for two sentences of + # length 4 and 6, respectively. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[4, 6]] + base_shape = [1] + # The range of random integers is [low, high] + word_data = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=1) + trg_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=1) # Construct feed as a dictionary of {feed_target_name: feed_target_data} # and results will contain a list of data corresponding to fetch_targets. @@ -252,7 +241,7 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print(results[0].lod()) + print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference shape: ", np_data.shape) print("Inference results: ", np_data) diff --git a/python/paddle/fluid/tests/book/test_word2vec.py b/python/paddle/fluid/tests/book/test_word2vec.py index 6dec0f6857..e761e05795 100644 --- a/python/paddle/fluid/tests/book/test_word2vec.py +++ b/python/paddle/fluid/tests/book/test_word2vec.py @@ -14,6 +14,7 @@ import paddle import paddle.fluid as fluid +from paddle.fluid.layers.device import get_places import unittest import os import numpy as np @@ -21,15 +22,6 @@ import math import sys -def create_random_lodtensor(lod, place, low, high): - # The range of data elements is [low, high] - data = np.random.random_integers(low, high, [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): PASS_NUM = 100 EMBED_SIZE = 32 @@ -89,19 +81,21 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): avg_cost, predict_word = __network__( [first_word, second_word, third_word, forth_word, next_word]) else: - places = fluid.layers.get_places() + places = get_places() pd = fluid.layers.ParallelDo(places) with pd.do(): avg_cost, predict_word = __network__( - map(pd.read_input, [ - first_word, second_word, third_word, forth_word, next_word - ])) + list( + map(pd.read_input, [ + first_word, second_word, third_word, forth_word, + next_word + ]))) pd.write_output(avg_cost) avg_cost = fluid.layers.mean(pd()) sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) - optimize_ops, params_grads = sgd_optimizer.minimize(avg_cost) + sgd_optimizer.minimize(avg_cost) train_reader = paddle.batch( paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) @@ -134,16 +128,16 @@ def train(use_cuda, is_sparse, is_parallel, save_dirname, is_local=True): if is_local: train_loop(fluid.default_main_program()) else: - port = os.getenv("PADDLE_INIT_PORT", "6174") - pserver_ips = os.getenv("PADDLE_INIT_PSERVERS") # ip,ip... + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS") # ip,ip... eplist = [] for ip in pserver_ips.split(","): eplist.append(':'.join([ip, port])) pserver_endpoints = ",".join(eplist) # ip:port,ip:port... - trainers = int(os.getenv("TRAINERS")) + trainers = int(os.getenv("PADDLE_TRAINERS")) current_endpoint = os.getenv("POD_IP") + ":" + port - trainer_id = int(os.getenv("PADDLE_INIT_TRAINER_ID")) - training_role = os.getenv("TRAINING_ROLE", "TRAINER") + trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + training_role = os.getenv("PADDLE_TRAINING_ROLE", "TRAINER") t = fluid.DistributeTranspiler() t.transpile(trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": @@ -175,16 +169,24 @@ def infer(use_cuda, save_dirname=None): word_dict = paddle.dataset.imikolov.build_dict() dict_size = len(word_dict) - # Setup inputs, by creating 4 words, the lod of which should be [0, 1] - lod = [0, 1] - first_word = create_random_lodtensor( - lod, place, low=0, high=dict_size - 1) - second_word = create_random_lodtensor( - lod, place, low=0, high=dict_size - 1) - third_word = create_random_lodtensor( - lod, place, low=0, high=dict_size - 1) - fourth_word = create_random_lodtensor( - lod, place, low=0, high=dict_size - 1) + # Setup inputs by creating 4 LoDTensors representing 4 words. Here each word + # is simply an index to look up for the corresponding word vector and hence + # the shape of word (base_shape) should be [1]. The recursive_sequence_lengths, + # which is length-based level of detail (lod) of each LoDTensor, should be [[1]] + # meaning there is only one level of detail and there is only one sequence of + # one word on this level. + # Note that recursive_sequence_lengths should be a list of lists. + recursive_seq_lens = [[1]] + base_shape = [1] + # The range of random integers is [low, high] + first_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + second_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + third_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) + fourth_word = fluid.create_random_int_lodtensor( + recursive_seq_lens, base_shape, place, low=0, high=dict_size - 1) assert feed_target_names[0] == 'firstw' assert feed_target_names[1] == 'secondw' @@ -202,7 +204,7 @@ def infer(use_cuda, save_dirname=None): }, fetch_list=fetch_targets, return_numpy=False) - print(results[0].lod()) + print(results[0].recursive_sequence_lengths()) np_data = np.array(results[0]) print("Inference Shape: ", np_data.shape) @@ -245,7 +247,7 @@ def inject_test_method(use_cuda, is_sparse, is_parallel): is_sparse=is_sparse, is_parallel=is_parallel) - if use_cuda and is_sparse: + if (not fluid.core.is_compiled_with_cuda() or use_cuda) and is_sparse: fn = __impl__ else: # skip the other test when on CI server diff --git a/python/paddle/fluid/tests/book/understand_sentiment/notest_understand_sentiment_stacked_lstm.py b/python/paddle/fluid/tests/book/understand_sentiment/notest_understand_sentiment_stacked_lstm.py deleted file mode 100644 index 9948e5c023..0000000000 --- a/python/paddle/fluid/tests/book/understand_sentiment/notest_understand_sentiment_stacked_lstm.py +++ /dev/null @@ -1,140 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from __future__ import print_function - -import paddle -import paddle.fluid as fluid -from functools import partial - -CLASS_DIM = 2 -EMB_DIM = 128 -HID_DIM = 512 -STACKED_NUM = 3 - - -def stacked_lstm_net(data, input_dim, class_dim, emb_dim, hid_dim, stacked_num): - assert stacked_num % 2 == 1 - - emb = fluid.layers.embedding( - input=data, size=[input_dim, emb_dim], is_sparse=True) - - fc1 = fluid.layers.fc(input=emb, size=hid_dim) - lstm1, cell1 = fluid.layers.dynamic_lstm(input=fc1, size=hid_dim) - - inputs = [fc1, lstm1] - - for i in range(2, stacked_num + 1): - fc = fluid.layers.fc(input=inputs, size=hid_dim) - lstm, cell = fluid.layers.dynamic_lstm( - input=fc, size=hid_dim, is_reverse=(i % 2) == 0) - inputs = [fc, lstm] - - fc_last = fluid.layers.sequence_pool(input=inputs[0], pool_type='max') - lstm_last = fluid.layers.sequence_pool(input=inputs[1], pool_type='max') - - prediction = fluid.layers.fc(input=[fc_last, lstm_last], - size=class_dim, - act='softmax') - return prediction - - -def inference_network(word_dict): - data = fluid.layers.data( - name="words", shape=[1], dtype="int64", lod_level=1) - - dict_dim = len(word_dict) - net = stacked_lstm_net(data, dict_dim, CLASS_DIM, EMB_DIM, HID_DIM, - STACKED_NUM) - return net - - -def train_network(word_dict): - prediction = inference_network(word_dict) - label = fluid.layers.data(name="label", shape=[1], dtype="int64") - cost = fluid.layers.cross_entropy(input=prediction, label=label) - avg_cost = fluid.layers.mean(cost) - accuracy = fluid.layers.accuracy(input=prediction, label=label) - return avg_cost, accuracy - - -def train(use_cuda, save_path): - BATCH_SIZE = 128 - EPOCH_NUM = 5 - - word_dict = paddle.dataset.imdb.word_dict() - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=1000), - batch_size=BATCH_SIZE) - - test_data = paddle.batch( - paddle.dataset.imdb.test(word_dict), batch_size=BATCH_SIZE) - - def event_handler(event): - if isinstance(event, fluid.EndIteration): - if (event.batch_id % 10) == 0: - avg_cost, accuracy = trainer.test(reader=test_data) - - print('BatchID {1:04}, Loss {2:2.2}, Acc {3:2.2}'.format( - event.batch_id + 1, avg_cost, accuracy)) - - if accuracy > 0.01: # Low threshold for speeding up CI - trainer.params.save(save_path) - return - - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - trainer = fluid.Trainer( - partial(train_network, word_dict), - optimizer=fluid.optimizer.Adagrad(learning_rate=0.002), - place=place, - event_handler=event_handler) - - trainer.train(train_data, EPOCH_NUM, event_handler=event_handler) - - -def infer(use_cuda, save_path): - params = fluid.Params(save_path) - place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() - word_dict = paddle.dataset.imdb.word_dict() - inferencer = fluid.Inferencer( - partial(inference_network, word_dict), params, place=place) - - def create_random_lodtensor(lod, place, low, high): - data = np.random.random_integers(low, high, - [lod[-1], 1]).astype("int64") - res = fluid.LoDTensor() - res.set(data, place) - res.set_lod([lod]) - return res - - lod = [0, 4, 10] - tensor_words = create_random_lodtensor( - lod, place, low=0, high=len(word_dict) - 1) - results = inferencer.infer({'words': tensor_words}) - print("infer results: ", results) - - -def main(use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return - save_path = "understand_sentiment_stacked_lstm.inference.model" - train(use_cuda, save_path) - infer(use_cuda, save_path) - - -if __name__ == '__main__': - for use_cuda in (False, True): - main(use_cuda=use_cuda) diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py index 8818cf96fa..ccc62b442f 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_fit_a_line.py @@ -12,12 +12,13 @@ # See the License for the specific language governing permissions and # limitations under the License. -import numpy as np -import paddle -import paddle.fluid as fluid import math import sys +import paddle +import paddle.fluid as fluid +from paddle.fluid.layers.device import get_places + # need to fix random seed and training data to compare the loss # value accurately calculated by the default and the memory optimization # version. @@ -34,7 +35,7 @@ if fluid.core.is_compiled_with_cuda(): use_nccl = False place = fluid.CUDAPlace(0) -places = fluid.layers.get_places(device_count=0, device_type=device_type) +places = get_places(device_count=0, device_type=device_type) pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) with pd.do(): x_ = pd.read_input(x) @@ -56,7 +57,7 @@ BATCH_SIZE = 200 # fix the order of training data train_reader = paddle.batch( - paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) + paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE, drop_last=False) # train_reader = paddle.batch( # paddle.reader.shuffle( @@ -77,7 +78,7 @@ for pass_id in range(PASS_NUM): if avg_loss_value[0] < 10.0: exit(0) # if avg cost less than 10.0, we think our code is good. - print avg_loss_value[0] + print(avg_loss_value[0]) if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") exit(1) diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py index dfebb9a06e..b2a59d27da 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_image_classification_train.py @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function - import sys import paddle diff --git a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py index a1ca6d981f..323ddfb691 100644 --- a/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py +++ b/python/paddle/fluid/tests/book_memory_optimization/test_memopt_machine_translation.py @@ -80,21 +80,6 @@ def encoder_decoder(): return rnn() -def to_lodtensor(data, place): - seq_lens = [len(seq) for seq in data] - cur_len = 0 - lod = [cur_len] - for l in seq_lens: - cur_len += l - lod.append(cur_len) - flattened_data = np.concatenate(data, axis=0).astype("int64") - flattened_data = flattened_data.reshape([len(flattened_data), 1]) - res = core.LoDTensor() - res.set(flattened_data, place) - res.set_lod([lod]) - return res - - def main(): rnn_out = encoder_decoder() label = layers.data( @@ -122,18 +107,21 @@ def main(): exe.run(framework.default_startup_program()) + feed_order = [ + 'src_word_id', 'target_language_word', 'target_language_next_word' + ] + + feed_list = [ + fluid.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + batch_id = 0 - for pass_id in xrange(10): + for pass_id in range(10): for data in train_data(): - word_data = to_lodtensor(map(lambda x: x[0], data), place) - trg_word = to_lodtensor(map(lambda x: x[1], data), place) - trg_word_next = to_lodtensor(map(lambda x: x[2], data), place) outs = exe.run(fluid.default_main_program(), - feed={ - 'src_word_id': word_data, - 'target_language_word': trg_word, - 'target_language_next_word': trg_word_next - }, + feed=feeder.feed(data), fetch_list=[avg_cost]) avg_cost_val = np.array(outs[0]) print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + diff --git a/python/paddle/fluid/tests/demo/fc_gan.py b/python/paddle/fluid/tests/demo/fc_gan.py index 8ea1b2b15c..3d92f50f0a 100644 --- a/python/paddle/fluid/tests/demo/fc_gan.py +++ b/python/paddle/fluid/tests/demo/fc_gan.py @@ -137,7 +137,7 @@ def main(): generated_img = exe.run(g_program, feed={'noise': n}, fetch_list={g_img})[0] - real_data = numpy.array(map(lambda x: x[0], data)).astype('float32') + real_data = numpy.array([x[0] for x in data]).astype('float32') real_data = real_data.reshape(num_true, 784) total_data = numpy.concatenate([real_data, generated_img]) total_label = numpy.concatenate([ @@ -150,7 +150,7 @@ def main(): feed={'img': total_data, 'label': total_label}, fetch_list={d_loss})[0] - for _ in xrange(NUM_TRAIN_TIMES_OF_DG): + for _ in range(NUM_TRAIN_TIMES_OF_DG): n = numpy.random.uniform( low=-1.0, high=1.0, size=[2 * num_true * NOISE_SIZE]).astype('float32').reshape( diff --git a/python/paddle/fluid/tests/demo/text_classification/.gitignore b/python/paddle/fluid/tests/demo/file_reader/.gitignore similarity index 100% rename from python/paddle/fluid/tests/demo/text_classification/.gitignore rename to python/paddle/fluid/tests/demo/file_reader/.gitignore diff --git a/python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py b/python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py similarity index 86% rename from python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py rename to python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py index 9425d472a4..a00325d79b 100644 --- a/python/paddle/fluid/tests/demo/text_classification/convert_data_to_recordio.py +++ b/python/paddle/fluid/tests/demo/file_reader/convert_data_to_recordio.py @@ -31,9 +31,12 @@ def load_vocab(filename): # load word dict with paddle inner function -word_dict = load_vocab(sys.argv[1]) -word_dict[""] = len(word_dict) -print "Dict dim = ", len(word_dict) +if len(sys.argv) == 1: + word_dict = paddle.dataset.imdb.word_dict() +else: + word_dict = load_vocab(sys.argv[1]) + word_dict[""] = len(word_dict) +print("Dict dim = ", len(word_dict)) # input text data data = fluid.layers.data(name="words", shape=[1], dtype="int64", lod_level=1) @@ -47,7 +50,7 @@ feeder = fluid.DataFeeder(feed_list=[data, label], place=fluid.CPUPlace()) BATCH_SIZE = 128 train_reader = paddle.batch( paddle.reader.shuffle( - paddle.dataset.imdb.train(word_dict), buf_size=10000), + paddle.dataset.imdb.train(word_dict), buf_size=25000), batch_size=BATCH_SIZE) test_reader = paddle.batch( diff --git a/python/paddle/fluid/tests/demo/file_reader/train.py b/python/paddle/fluid/tests/demo/file_reader/train.py new file mode 100644 index 0000000000..bc3a6dc81d --- /dev/null +++ b/python/paddle/fluid/tests/demo/file_reader/train.py @@ -0,0 +1,138 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import numpy +import sys + +TRAIN_FILES = ['train.recordio'] +TEST_FILES = ['test.recordio'] + +DICT_DIM = 5147 + +# embedding dim +emb_dim = 128 + +# hidden dim +hid_dim = 128 + +# class num +class_dim = 2 + +# epoch num +epoch_num = 10 + + +def build_program(is_train): + file_obj_handle = fluid.layers.io.open_files( + filenames=TRAIN_FILES if is_train else TEST_FILES, + shapes=[[-1, 1], [-1, 1]], + lod_levels=[1, 0], + dtypes=['int64', 'int64']) + + file_obj = fluid.layers.io.double_buffer(file_obj_handle) + + with fluid.unique_name.guard(): + + data, label = fluid.layers.read_file(file_obj) + + emb = fluid.layers.embedding(input=data, size=[DICT_DIM, emb_dim]) + + conv_3 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=3, + act="tanh", + pool_type="sqrt") + + conv_4 = fluid.nets.sequence_conv_pool( + input=emb, + num_filters=hid_dim, + filter_size=4, + act="tanh", + pool_type="sqrt") + + prediction = fluid.layers.fc(input=[conv_3, conv_4], + size=class_dim, + act="softmax") + + # cross entropy loss + cost = fluid.layers.cross_entropy(input=prediction, label=label) + + # mean loss + avg_cost = fluid.layers.mean(x=cost) + acc = fluid.layers.accuracy(input=prediction, label=label) + + if is_train: + # SGD optimizer + sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + return {'loss': avg_cost, 'log': [avg_cost, acc], 'file': file_obj_handle} + + +def main(): + train = fluid.Program() + startup = fluid.Program() + test = fluid.Program() + + with fluid.program_guard(train, startup): + train_args = build_program(is_train=True) + + with fluid.program_guard(test, startup): + test_args = build_program(is_train=False) + + use_cuda = fluid.core.is_compiled_with_cuda() + # startup + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place=place) + exe.run(startup) + + train_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + loss_name=train_args['loss'].name, + main_program=train) + test_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, main_program=test, share_vars_from=train_exe) + + fetch_var_list = [var.name for var in train_args['log']] + for epoch_id in range(epoch_num): + # train + try: + batch_id = 0 + while True: + loss, acc = map(numpy.array, + train_exe.run(fetch_list=fetch_var_list)) + print 'Train epoch', epoch_id, 'batch', batch_id, 'loss:', loss, 'acc:', acc + batch_id += 1 + except fluid.core.EOFException: + print 'End of epoch', epoch_id + train_args['file'].reset() + + # test + loss = [] + acc = [] + try: + while True: + loss_np, acc_np = map(numpy.array, + test_exe.run(fetch_list=fetch_var_list)) + loss.append(loss_np[0]) + acc.append(acc_np[0]) + except: + test_args['file'].reset() + print 'Test loss:', numpy.mean(loss), 'acc:', numpy.mean(acc) + + +if __name__ == '__main__': + main() diff --git a/python/paddle/fluid/tests/demo/pyreader.py b/python/paddle/fluid/tests/demo/pyreader.py new file mode 100644 index 0000000000..8206540193 --- /dev/null +++ b/python/paddle/fluid/tests/demo/pyreader.py @@ -0,0 +1,98 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy + +import paddle +import paddle.dataset.mnist as mnist +import paddle.fluid as fluid +import paddle.v2 + + +def network(is_train): + reader = fluid.layers.py_reader( + capacity=10, + shapes=((-1, 784), (-1, 1)), + dtypes=('float32', 'int64'), + name="train_reader" if is_train else "test_reader", + use_double_buffer=True) + img, label = fluid.layers.read_file(reader) + + hidden = img + + for i in xrange(2): + hidden = fluid.layers.fc(input=hidden, size=100, act='tanh') + hidden = fluid.layers.dropout( + hidden, dropout_prob=0.5, is_test=not is_train) + + prediction = fluid.layers.fc(input=hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + return fluid.layers.mean(loss), reader + + +def main(): + train_prog = fluid.Program() + startup_prog = fluid.Program() + + with fluid.program_guard(train_prog, startup_prog): + with fluid.unique_name.guard(): + loss, train_reader = network(True) + adam = fluid.optimizer.Adam(learning_rate=0.01) + adam.minimize(loss) + + test_prog = fluid.Program() + test_startup = fluid.Program() + with fluid.program_guard(test_prog, test_startup): + with fluid.unique_name.guard(): + test_loss, test_reader = network(False) + + use_cuda = fluid.core.is_compiled_with_cuda() + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + fluid.Executor(place).run(startup_prog) + fluid.Executor(place).run(test_startup) + + trainer = fluid.ParallelExecutor( + use_cuda=use_cuda, loss_name=loss.name, main_program=train_prog) + + tester = fluid.ParallelExecutor( + use_cuda=use_cuda, share_vars_from=trainer, main_program=test_prog) + + train_reader.decorate_paddle_reader( + paddle.v2.reader.shuffle( + paddle.batch(mnist.train(), 512), buf_size=8192)) + + test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512)) + + for epoch_id in xrange(10): + train_reader.start() + try: + while True: + print 'train_loss', numpy.array( + trainer.run(fetch_list=[loss.name])) + except fluid.core.EOFException: + print 'End of epoch', epoch_id + train_reader.reset() + + test_reader.start() + try: + while True: + print 'test loss', numpy.array( + tester.run(fetch_list=[test_loss.name])) + except fluid.core.EOFException: + print 'End of testing' + test_reader.reset() + + +if __name__ == '__main__': + main() diff --git a/python/paddle/fluid/tests/demo/text_classification/train.py b/python/paddle/fluid/tests/demo/text_classification/train.py deleted file mode 100644 index e408684c6e..0000000000 --- a/python/paddle/fluid/tests/demo/text_classification/train.py +++ /dev/null @@ -1,148 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle.fluid as fluid -import numpy -import sys - -TRAIN_FILES = ['train.recordio'] -TEST_FILES = ['test.recordio'] - -DICT_DIM = 89528 - -# embedding dim -emb_dim = 128 - -# hidden dim -hid_dim = 128 - -# hidden dim2 -hid_dim2 = 96 - -# class num -class_dim = 2 - - -def network_cfg(is_train, pass_num=100): - with fluid.unique_name.guard(): - train_file_obj = fluid.layers.open_files( - filenames=TRAIN_FILES, - pass_num=pass_num, - shapes=[[-1, 1], [-1, 1]], - lod_levels=[1, 0], - dtypes=['int64', 'int64'], - thread_num=1) - - test_file_obj = fluid.layers.open_files( - filenames=TEST_FILES, - pass_num=1, - shapes=[[-1, 1], [-1, 1]], - lod_levels=[1, 0], - dtypes=['int64', 'int64'], - thread_num=1) - - if is_train: - file_obj = fluid.layers.shuffle(train_file_obj, buffer_size=1000) - else: - file_obj = test_file_obj - - file_obj = fluid.layers.double_buffer( - file_obj, - name="train_double_buffer" if is_train else 'test_double_buffer') - - data, label = fluid.layers.read_file(file_obj) - - emb = fluid.layers.embedding(input=data, size=[DICT_DIM, emb_dim]) - - # sequence conv with window size = 3 - win_size = 3 - conv_3 = fluid.nets.sequence_conv_pool( - input=emb, - num_filters=hid_dim, - filter_size=win_size, - act="tanh", - pool_type="max") - - # fc layer after conv - fc_1 = fluid.layers.fc(input=[conv_3], size=hid_dim2) - - # probability of each class - prediction = fluid.layers.fc(input=[fc_1], - size=class_dim, - act="softmax") - # cross entropy loss - cost = fluid.layers.cross_entropy(input=prediction, label=label) - - # mean loss - avg_cost = fluid.layers.mean(x=cost) - acc = fluid.layers.accuracy(input=prediction, label=label) - - if is_train: - # SGD optimizer - sgd_optimizer = fluid.optimizer.Adagrad(learning_rate=0.01) - sgd_optimizer.minimize(avg_cost) - - return { - 'loss': avg_cost, - 'log': [avg_cost, acc], - 'file': train_file_obj if is_train else test_file_obj - } - - -def main(): - train = fluid.Program() - startup = fluid.Program() - - with fluid.program_guard(train, startup): - train_args = network_cfg(is_train=True) - - test = fluid.Program() - - with fluid.program_guard(test, fluid.Program()): - test_args = network_cfg(is_train=False) - - # startup - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place=place) - exe.run(startup) - - train_exe = fluid.ParallelExecutor( - use_cuda=True, loss_name=train_args['loss'].name, main_program=train) - - fetch_var_list = [var.name for var in train_args['log']] - for i in xrange(sys.maxint): - result = map(numpy.array, - train_exe.run(fetch_list=fetch_var_list - if i % 1000 == 0 else [])) - if len(result) != 0: - print 'Train: ', result - - if i % 1000 == 0: - test_exe = fluid.ParallelExecutor( - use_cuda=True, main_program=test, share_vars_from=train_exe) - loss = [] - acc = [] - try: - while True: - loss_np, acc_np = map( - numpy.array, test_exe.run(fetch_list=fetch_var_list)) - loss.append(loss_np[0]) - acc.append(acc_np[0]) - except: - test_args['file'].reset() - print 'TEST: ', numpy.mean(loss), numpy.mean(acc) - - -if __name__ == '__main__': - main() diff --git a/python/paddle/fluid/tests/test_concurrency.py b/python/paddle/fluid/tests/no_test_concurrency.py similarity index 99% rename from python/paddle/fluid/tests/test_concurrency.py rename to python/paddle/fluid/tests/no_test_concurrency.py index e8f6cfb4a9..3bc0c9808e 100644 --- a/python/paddle/fluid/tests/test_concurrency.py +++ b/python/paddle/fluid/tests/no_test_concurrency.py @@ -194,7 +194,7 @@ class TestRoutineOp(unittest.TestCase): quit_ch = fluid.make_channel(dtype=core.VarDesc.VarType.LOD_TENSOR) with fluid.Go(): - for i in xrange(10): + for i in range(10): fluid.channel_recv(ch1, result) Print(result) diff --git a/python/paddle/fluid/tests/test_beam_search_decoder.py b/python/paddle/fluid/tests/test_beam_search_decoder.py new file mode 100644 index 0000000000..8bf750940d --- /dev/null +++ b/python/paddle/fluid/tests/test_beam_search_decoder.py @@ -0,0 +1,265 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +A simple machine translation demo using beam search decoder. +""" + +import contextlib +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +import paddle.fluid.framework as framework +import paddle.fluid.layers as layers +from paddle.fluid.executor import Executor +from paddle.fluid.contrib.decoder.beam_search_decoder import * +import unittest +import os + +dict_size = 30000 +source_dict_dim = target_dict_dim = dict_size +src_dict, trg_dict = paddle.dataset.wmt14.get_dict(dict_size) +hidden_dim = 32 +word_dim = 32 +decoder_size = hidden_dim +IS_SPARSE = True +batch_size = 2 +max_length = 8 +topk_size = 50 +trg_dic_size = 10000 +beam_size = 2 + + +def encoder(): + # encoder + src_word = layers.data( + name="src_word", shape=[1], dtype='int64', lod_level=1) + src_embedding = layers.embedding( + input=src_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=IS_SPARSE) + + fc1 = layers.fc(input=src_embedding, size=hidden_dim * 4, act='tanh') + lstm_hidden0, lstm_0 = layers.dynamic_lstm(input=fc1, size=hidden_dim * 4) + encoder_out = layers.sequence_last_step(input=lstm_hidden0) + return encoder_out + + +def decoder_state_cell(context): + h = InitState(init=context, need_reorder=True) + state_cell = StateCell(inputs={'x': None}, states={'h': h}, out_state='h') + + @state_cell.state_updater + def updater(state_cell): + current_word = state_cell.get_input('x') + prev_h = state_cell.get_state('h') + # make sure lod of h heritted from prev_h + h = layers.fc(input=[prev_h, current_word], + size=decoder_size, + act='tanh') + state_cell.set_state('h', h) + + return state_cell + + +def decoder_train(state_cell): + # decoder + trg_language_word = layers.data( + name="target_word", shape=[1], dtype='int64', lod_level=1) + trg_embedding = layers.embedding( + input=trg_language_word, + size=[dict_size, word_dim], + dtype='float32', + is_sparse=IS_SPARSE) + + decoder = TrainingDecoder(state_cell) + + with decoder.block(): + current_word = decoder.step_input(trg_embedding) + decoder.state_cell.compute_state(inputs={'x': current_word}) + current_score = layers.fc(input=decoder.state_cell.get_state('h'), + size=target_dict_dim, + act='softmax') + decoder.state_cell.update_states() + decoder.output(current_score) + + return decoder() + + +def decoder_decode(state_cell): + init_ids = layers.data( + name="init_ids", shape=[1], dtype="int64", lod_level=2) + init_scores = layers.data( + name="init_scores", shape=[1], dtype="float32", lod_level=2) + + decoder = BeamSearchDecoder( + state_cell=state_cell, + init_ids=init_ids, + init_scores=init_scores, + target_dict_dim=target_dict_dim, + word_dim=word_dim, + input_var_dict={}, + topk_size=topk_size, + sparse_emb=IS_SPARSE, + max_len=max_length, + beam_size=beam_size, + end_id=1, + name=None) + decoder.decode() + translation_ids, translation_scores = decoder() + + return translation_ids, translation_scores + + +def train_main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + context = encoder() + state_cell = decoder_state_cell(context) + rnn_out = decoder_train(state_cell) + label = layers.data( + name="target_next_word", shape=[1], dtype='int64', lod_level=1) + cost = layers.cross_entropy(input=rnn_out, label=label) + avg_cost = layers.mean(x=cost) + + optimizer = fluid.optimizer.Adagrad(learning_rate=1e-3) + optimizer.minimize(avg_cost) + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=batch_size) + feed_order = ['src_word', 'target_word', 'target_next_word'] + + exe = Executor(place) + + def train_loop(main_program): + exe.run(framework.default_startup_program()) + + feed_list = [ + main_program.global_block().var(var_name) for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + for pass_id in range(1): + for batch_id, data in enumerate(train_reader()): + outs = exe.run(main_program, + feed=feeder.feed(data), + fetch_list=[avg_cost]) + avg_cost_val = np.array(outs[0]) + print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) + + " avg_cost=" + str(avg_cost_val)) + if batch_id > 3: + break + + train_loop(framework.default_main_program()) + + +def decode_main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + context = encoder() + state_cell = decoder_state_cell(context) + translation_ids, translation_scores = decoder_decode(state_cell) + + exe = Executor(place) + exe.run(framework.default_startup_program()) + + init_ids_data = np.array([0 for _ in range(batch_size)], dtype='int64') + init_scores_data = np.array( + [1. for _ in range(batch_size)], dtype='float32') + init_ids_data = init_ids_data.reshape((batch_size, 1)) + init_scores_data = init_scores_data.reshape((batch_size, 1)) + init_lod = [1] * batch_size + init_lod = [init_lod, init_lod] + + init_ids = fluid.create_lod_tensor(init_ids_data, init_lod, place) + init_scores = fluid.create_lod_tensor(init_scores_data, init_lod, place) + + train_reader = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.wmt14.train(dict_size), buf_size=1000), + batch_size=batch_size) + + feed_order = ['src_word'] + feed_list = [ + framework.default_main_program().global_block().var(var_name) + for var_name in feed_order + ] + feeder = fluid.DataFeeder(feed_list, place) + + data = next(train_reader()) + feed_dict = feeder.feed([[x[0]] for x in data]) + feed_dict['init_ids'] = init_ids + feed_dict['init_scores'] = init_scores + + result_ids, result_scores = exe.run( + framework.default_main_program(), + feed=feed_dict, + fetch_list=[translation_ids, translation_scores], + return_numpy=False) + print(result_ids.lod()) + + +class TestBeamSearchDecoder(unittest.TestCase): + pass + + +@contextlib.contextmanager +def scope_prog_guard(): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + +def inject_test_train(use_cuda): + f_name = 'test_{0}_train'.format('cuda' if use_cuda else 'cpu') + + def f(*args): + with scope_prog_guard(): + train_main(use_cuda) + + setattr(TestBeamSearchDecoder, f_name, f) + + +def inject_test_decode(use_cuda, decorator=None): + f_name = 'test_{0}_decode'.format('cuda' if use_cuda else 'cpu', 'sparse') + + def f(*args): + with scope_prog_guard(): + decode_main(use_cuda) + + if decorator is not None: + f = decorator(f) + + setattr(TestBeamSearchDecoder, f_name, f) + + +for _use_cuda_ in (False, True): + inject_test_train(_use_cuda_) + +for _use_cuda_ in (False, True): + _decorator_ = None + inject_test_decode(use_cuda=_use_cuda_, decorator=_decorator_) + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/test_cpp_reader.py b/python/paddle/fluid/tests/test_cpp_reader.py index e54c73b295..6cc291dfcf 100644 --- a/python/paddle/fluid/tests/test_cpp_reader.py +++ b/python/paddle/fluid/tests/test_cpp_reader.py @@ -44,8 +44,8 @@ create_random_data_generator_op = startup_block.append_op( attrs={ "shape_concat": [1, 2, 1, 1], "ranks": [2, 2], - "min": 0.0, - "max": 1.0, + "low": 0.0, + "high": 1.0, 'lod_levels': [0, 0] }) diff --git a/python/paddle/fluid/tests/test_data_feeder.py b/python/paddle/fluid/tests/test_data_feeder.py index 861dd3174a..30b7a634a2 100644 --- a/python/paddle/fluid/tests/test_data_feeder.py +++ b/python/paddle/fluid/tests/test_data_feeder.py @@ -13,15 +13,61 @@ # limitations under the License. import paddle.fluid as fluid +import unittest -def test_converter(): - img = fluid.layers.data(name='image', shape=[1, 28, 28]) - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) - result = feeder.feed([[[0] * 784, [9]], [[1] * 784, [1]]]) - print(result) +class TestDataFeeder(unittest.TestCase): + def test_lod_level_0_converter(self): + img = fluid.layers.data(name='image', shape=[1, 28, 28]) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([img, label], fluid.CPUPlace()) + result = feeder.feed([([0] * 784, [9]), ([1] * 784, [1])]) + + self.assertEqual(result['image'].shape(), [2, 1, 28, 28]) + self.assertEqual(result['label'].shape(), [2, 1]) + self.assertEqual(result['image'].recursive_sequence_lengths(), []) + self.assertEqual(result['label'].recursive_sequence_lengths(), []) + + def test_lod_level_1_converter(self): + # lod_level = 1 + # each sentence has a different number of words + sentences = fluid.layers.data( + name='sentences', shape=[1], dtype='int64', lod_level=1) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([sentences, label], fluid.CPUPlace()) + + # lod = [[0, 3, 5, 9]] + # data = [[1, 2, 3], [4, 5], [6, 7, 8, 9]] + # label = [1] * len(data) + result = feeder.feed( + [([1, 2, 3], [1]), ([4, 5], [1]), ([6, 7, 8, 9], [1])]) + + self.assertEqual(result['sentences'].shape(), [9, 1]) + self.assertEqual(result['label'].shape(), [3, 1]) + self.assertEqual(result['sentences'].recursive_sequence_lengths(), + [[3, 2, 4]]) + self.assertEqual(result['label'].recursive_sequence_lengths(), []) + + def test_lod_level_2_converter(self): + # lod_level = 2 + # paragraphs -> sentences -> words + paragraphs = fluid.layers.data( + name='paragraphs', shape=[1], dtype='int64', lod_level=2) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + feeder = fluid.DataFeeder([paragraphs, label], fluid.CPUPlace()) + + # lod = [[0, 2, 3], [0, 3, 5, 9]] + # data = [[[1, 2, 3], [4, 5]], [[6, 7, 8, 9]]] + # label = [1] * len(data) + result = feeder.feed( + [([[1, 2, 3], [4, 5]], [1]), ([[6, 7, 8, 9]], [1])]) + + self.assertEqual(result['paragraphs'].shape(), [9, 1]) + self.assertEqual(result['label'].shape(), [2, 1]) + self.assertEqual(result['paragraphs'].recursive_sequence_lengths(), + [[2, 1], [3, 2, 4]]) + self.assertEqual(result['label'].recursive_sequence_lengths(), []) if __name__ == '__main__': - test_converter() + unittest.main() diff --git a/python/paddle/fluid/tests/test_detection.py b/python/paddle/fluid/tests/test_detection.py index 921260ef3f..fd45abd0a7 100644 --- a/python/paddle/fluid/tests/test_detection.py +++ b/python/paddle/fluid/tests/test_detection.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import paddle.fluid as fluid import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard @@ -109,6 +108,42 @@ class TestDetection(unittest.TestCase): print(str(program)) +class TestPriorBox(unittest.TestCase): + def test_prior_box(self): + data_shape = [3, 224, 224] + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32') + conv1 = fluid.layers.conv2d(images, 3, 3, 2) + box, var = layers.prior_box( + input=conv1, + image=images, + min_sizes=[100.0], + aspect_ratios=[1.], + flip=True, + clip=True) + assert len(box.shape) == 4 + assert box.shape == var.shape + assert box.shape[3] == 4 + + +class TestAnchorGenerator(unittest.TestCase): + def test_anchor_generator(self): + data_shape = [3, 224, 224] + images = fluid.layers.data( + name='pixel', shape=data_shape, dtype='float32') + conv1 = fluid.layers.conv2d(images, 3, 3, 2) + anchor, var = fluid.layers.anchor_generator( + input=conv1, + anchor_sizes=[64, 128, 256, 512], + aspect_ratios=[0.5, 1.0, 2.0], + variance=[0.1, 0.1, 0.2, 0.2], + stride=[16.0, 16.0], + offset=0.5) + assert len(anchor.shape) == 4 + assert anchor.shape == var.shape + assert anchor.shape[3] == 4 + + class TestMultiBoxHead(unittest.TestCase): def test_multi_box_head(self): data_shape = [3, 224, 224] diff --git a/python/paddle/fluid/tests/test_error_clip.py b/python/paddle/fluid/tests/test_error_clip.py index 89f4c64975..e8edd7fbbb 100644 --- a/python/paddle/fluid/tests/test_error_clip.py +++ b/python/paddle/fluid/tests/test_error_clip.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function import numpy as np import paddle import paddle.fluid as fluid @@ -36,7 +35,7 @@ with fluid.program_guard(main_program=prog): avg_cost = fluid.layers.mean(cost) prog_clip = prog.clone() -prog_clip.block(0).var(hidden1.name).set_error_clip( +prog_clip.block(0).var(hidden1.name)._set_error_clip( fluid.clip.ErrorClipByValue( max=CLIP_MAX, min=CLIP_MIN)) diff --git a/python/paddle/fluid/tests/test_mnist_if_else_op.py b/python/paddle/fluid/tests/test_if_else_op.py similarity index 58% rename from python/paddle/fluid/tests/test_mnist_if_else_op.py rename to python/paddle/fluid/tests/test_if_else_op.py index d34f52db5f..082f64c146 100644 --- a/python/paddle/fluid/tests/test_mnist_if_else_op.py +++ b/python/paddle/fluid/tests/test_if_else_op.py @@ -14,10 +14,15 @@ import paddle import paddle.fluid.layers as layers -from paddle.fluid.framework import Program, program_guard, default_main_program, default_startup_program +from paddle.fluid.framework import Program, program_guard from paddle.fluid.executor import Executor from paddle.fluid.optimizer import MomentumOptimizer import paddle.fluid.core as core +import paddle.fluid as fluid +from paddle.fluid.layers.control_flow import split_lod_tensor +from paddle.fluid.layers.control_flow import merge_lod_tensor +from paddle.fluid.layers.control_flow import ConditionalBlock + import unittest import numpy as np @@ -31,14 +36,12 @@ class TestMNISTIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') - limit = layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0) + limit = layers.fill_constant(shape=[1], dtype='int64', value=5) cond = layers.less_than(x=label, y=limit) - true_image, false_image = layers.split_lod_tensor( - input=image, mask=cond) + true_image, false_image = split_lod_tensor(input=image, mask=cond) true_out = layers.create_tensor(dtype='float32') - true_cond = layers.ConditionalBlock([true_image]) + true_cond = ConditionalBlock([cond]) with true_cond.block(): hidden = layers.fc(input=true_image, size=100, act='tanh') @@ -46,14 +49,14 @@ class TestMNISTIfElseOp(unittest.TestCase): layers.assign(input=prob, output=true_out) false_out = layers.create_tensor(dtype='float32') - false_cond = layers.ConditionalBlock([false_image]) + false_cond = ConditionalBlock([cond]) with false_cond.block(): hidden = layers.fc(input=false_image, size=200, act='tanh') prob = layers.fc(input=hidden, size=10, act='softmax') layers.assign(input=prob, output=false_out) - prob = layers.merge_lod_tensor( + prob = merge_lod_tensor( in_true=true_out, in_false=false_out, mask=cond, x=image) loss = layers.cross_entropy(input=prob, label=label) avg_loss = layers.mean(loss) @@ -64,7 +67,7 @@ class TestMNISTIfElseOp(unittest.TestCase): train_reader = paddle.batch( paddle.reader.shuffle( paddle.dataset.mnist.train(), buf_size=8192), - batch_size=200) + batch_size=10) place = core.CPUPlace() exe = Executor(place) @@ -73,15 +76,15 @@ class TestMNISTIfElseOp(unittest.TestCase): PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - x_data = np.array(map(lambda x: x[0], data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") + x_data = np.array([x[0] for x in data]).astype("float32") + y_data = np.array([x[1] for x in data]).astype("int64") y_data = np.expand_dims(y_data, axis=1) outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) - print outs[0] + print(outs[0]) if outs[0] < 1.0: return self.assertFalse(True) @@ -94,8 +97,7 @@ class TestMNISTIfElseOp(unittest.TestCase): label = layers.data(name='y', shape=[1], dtype='int64') - limit = layers.fill_constant_batch_size_like( - input=label, dtype='int64', shape=[1], value=5.0) + limit = layers.fill_constant(shape=[1], dtype='int64', value=5) cond = layers.less_than(x=label, y=limit) ie = layers.IfElse(cond) @@ -125,24 +127,85 @@ class TestMNISTIfElseOp(unittest.TestCase): place = core.CPUPlace() exe = Executor(place) - exe.run(kwargs['startup_program']) + exe.run(startup_prog) PASS_NUM = 100 for pass_id in range(PASS_NUM): for data in train_reader(): - x_data = np.array(map(lambda x: x[0], data)).astype("float32") - y_data = np.array(map(lambda x: x[1], data)).astype("int64") + x_data = np.array([x[0] for x in data]).astype("float32") + y_data = np.array([x[1] for x in data]).astype("int64") y_data = y_data.reshape((y_data.shape[0], 1)) - outs = exe.run(kwargs['main_program'], + outs = exe.run(prog, feed={'x': x_data, 'y': y_data}, fetch_list=[avg_loss]) - print outs[0] + print(outs[0]) if outs[0] < 1.0: return self.assertFalse(True) +class TestIfElse(unittest.TestCase): + def set_test_case(self): + # condiction is: self.data < self.cond_value + self.cond_value = 0.5 + self.data = np.random.rand(25, 1).astype(np.float32) + + def compare_ifelse_op_and_numpy(self, place): + self.set_test_case() + + prog = Program() + startup_prog = Program() + with program_guard(prog, startup_prog): + src = layers.data(name='data', shape=[1], dtype='float32') + cond = layers.fill_constant( + [1], dtype='float32', value=self.cond_value) + ifcond = layers.less_than(x=src, y=cond) + ie = layers.IfElse(ifcond) + with ie.true_block(): + true_target = ie.input(src) + ie.output(true_target) + + with ie.false_block(): + false_target = ie.input(src) + ie.output(false_target) + if_out = ie() + out = layers.reduce_sum(if_out) + + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + fetch_list = [out] + o1, = exe.run(fluid.default_main_program(), + feed={'data': self.data}, + fetch_list=[out]) + o2 = np.sum(self.data) + self.assertTrue( + np.allclose( + o1, o2, atol=1e-8), + "IfElse result : " + str(o1) + "\n Numpy result :" + str(o2)) + + def test_cpu(self): + self.compare_ifelse_op_and_numpy(fluid.CPUPlace()) + + def test_cuda(self): + if not core.is_compiled_with_cuda(): + return + self.compare_ifelse_op_and_numpy(fluid.CUDAPlace(0)) + + +class TestIfElseTrueBranch(TestIfElse): + def set_test_case(self): + # condiction is: self.data < self.cond_value + self.cond_value = 10. + self.data = np.random.rand(25, 1).astype(np.float32) + + +class TestIfElseFalseBranch(TestIfElse): + def set_test_case(self): + # condiction is: self.data < self.cond_value + self.cond_value = -10. + self.data = np.random.rand(25, 1).astype(np.float32) + + if __name__ == '__main__': - # temp disable if else unittest since it could be buggy. - exit(0) + unittest.main() diff --git a/python/paddle/fluid/tests/test_lod_tensor.py b/python/paddle/fluid/tests/test_lod_tensor.py new file mode 100644 index 0000000000..f7a9dd4129 --- /dev/null +++ b/python/paddle/fluid/tests/test_lod_tensor.py @@ -0,0 +1,99 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +from paddle.fluid.lod_tensor import create_lod_tensor, create_random_int_lodtensor +import numpy as np +import unittest + + +class TestLoDTensor(unittest.TestCase): + def test_pybind_recursive_seq_lens(self): + tensor = fluid.LoDTensor() + recursive_seq_lens = [] + tensor.set_recursive_sequence_lengths(recursive_seq_lens) + recursive_seq_lens = [[], [1], [3]] + self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, + recursive_seq_lens) + recursive_seq_lens = [[0], [2], [3]] + self.assertRaises(Exception, tensor.set_recursive_sequence_lengths, + recursive_seq_lens) + + recursive_seq_lens = [[1, 2, 3]] + tensor.set_recursive_sequence_lengths(recursive_seq_lens) + self.assertEqual(tensor.recursive_sequence_lengths(), + recursive_seq_lens) + tensor.set(np.random.random([6, 1]), fluid.CPUPlace()) + self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) + tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) + self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) + + # Each level's sum should be equal to the number of items in the next level + # Moreover, last level's sum should be equal to the tensor height + recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 2]] + tensor.set_recursive_sequence_lengths(recursive_seq_lens) + self.assertEqual(tensor.recursive_sequence_lengths(), + recursive_seq_lens) + tensor.set(np.random.random([8, 1]), fluid.CPUPlace()) + self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) + recursive_seq_lens = [[2, 3], [1, 3, 1, 2, 1]] + tensor.set_recursive_sequence_lengths(recursive_seq_lens) + self.assertTrue(tensor.has_valid_recursive_sequence_lengths()) + tensor.set(np.random.random([9, 1]), fluid.CPUPlace()) + self.assertFalse(tensor.has_valid_recursive_sequence_lengths()) + + def test_create_lod_tensor(self): + # Create LoDTensor from a list + data = [[1, 2, 3], [3, 4]] + wrong_recursive_seq_lens = [[2, 2]] + correct_recursive_seq_lens = [[3, 2]] + self.assertRaises(AssertionError, create_lod_tensor, data, + wrong_recursive_seq_lens, fluid.CPUPlace()) + tensor = create_lod_tensor(data, correct_recursive_seq_lens, + fluid.CPUPlace()) + self.assertEqual(tensor.recursive_sequence_lengths(), + correct_recursive_seq_lens) + + # Create LoDTensor from numpy array + data = np.random.random([10, 1]) + recursive_seq_lens = [[2, 1], [3, 3, 4]] + tensor = create_lod_tensor(data, recursive_seq_lens, fluid.CPUPlace()) + self.assertEqual(tensor.recursive_sequence_lengths(), + recursive_seq_lens) + + # Create LoDTensor from another LoDTensor, they are differnt instances + new_recursive_seq_lens = [[2, 2, 1], [1, 2, 2, 3, 2]] + new_tensor = create_lod_tensor(tensor, new_recursive_seq_lens, + fluid.CPUPlace()) + self.assertEqual(tensor.recursive_sequence_lengths(), + recursive_seq_lens) + self.assertEqual(new_tensor.recursive_sequence_lengths(), + new_recursive_seq_lens) + + def test_create_random_int_lodtensor(self): + # The shape of a word, commonly used in speech and NLP problem, is [1] + shape = [1] + recursive_seq_lens = [[2, 3, 5]] + dict_size = 10000 + low = 0 + high = dict_size - 1 + tensor = create_random_int_lodtensor(recursive_seq_lens, shape, + fluid.CPUPlace(), low, high) + self.assertEqual(tensor.recursive_sequence_lengths(), + recursive_seq_lens) + self.assertEqual(tensor.shape(), [10, 1]) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/.gitignore b/python/paddle/fluid/tests/unittests/.gitignore index 3538a9c200..b1e8fda03a 100644 --- a/python/paddle/fluid/tests/unittests/.gitignore +++ b/python/paddle/fluid/tests/unittests/.gitignore @@ -4,3 +4,5 @@ mnist_1.recordio mnist_2.recordio flowers.recordio wmt16.recordio +data_balance_test.recordio +data_balance_with_lod_test.recordio diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index d9190408e1..a6a911721d 100644 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -12,12 +12,17 @@ endif(NOT WITH_MKLDNN) if(NOT WITH_DISTRIBUTE) list(REMOVE_ITEM TEST_OPS test_recv_op) + list(REMOVE_ITEM TEST_OPS test_dist_transpiler) + list(REMOVE_ITEM TEST_OPS test_simple_dist_transpiler) + list(REMOVE_ITEM TEST_OPS test_listen_and_serv_op) + LIST(REMOVE_ITEM TEST_OPS test_dist_mnist) + LIST(REMOVE_ITEM TEST_OPS test_dist_word2vec) endif(NOT WITH_DISTRIBUTE) list(REMOVE_ITEM TEST_OPS test_seq_concat_op) # FIXME(helin): https://github.com/PaddlePaddle/Paddle/issues/8290 -list(REMOVE_ITEM TEST_OPS test_modified_huber_loss_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5184 +list(REMOVE_ITEM TEST_OPS test_modified_huber_loss_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5184 list(REMOVE_ITEM TEST_OPS test_lstm_unit_op) # # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/5185 -list(REMOVE_ITEM TEST_OPS test_nce) # IXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/7778 +list(REMOVE_ITEM TEST_OPS test_nce) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/7778 list(REMOVE_ITEM TEST_OPS test_recurrent_op) # FIXME(qijun) https://github.com/PaddlePaddle/Paddle/issues/6152 list(REMOVE_ITEM TEST_OPS test_cond_op) # FIXME(qijun): https://github.com/PaddlePaddle/Paddle/issues/5101#issuecomment-339814957 @@ -26,82 +31,40 @@ list(REMOVE_ITEM TEST_OPS decorators) # decorators is a helper python file, not function(py_test_modules TARGET_NAME) if(WITH_TESTING) - set(options "") + set(options SERIAL) set(oneValueArgs "") - set(multiValueArgs MODULES DEPS ARGS ENVS) + set(multiValueArgs MODULES DEPS ENVS) cmake_parse_arguments(py_test_modules "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) add_test(NAME ${TARGET_NAME} COMMAND env PYTHONPATH=${PADDLE_BINARY_DIR}/python ${py_test_modules_ENVS} - ${PYTHON_EXECUTABLE} -u -m unittest --verbose ${py_test_modules_MODULES} ${py_test_modules_ARGS} + ${PYTHON_EXECUTABLE} ${PADDLE_SOURCE_DIR}/tools/test_runner.py ${py_test_modules_MODULES} WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}) + if (py_test_modules_SERIAL) + set_property(TEST ${TARGET_NAME} PROPERTY RUN_SERIAL 1) + endif() endif() endfunction() - -list(REMOVE_ITEM TEST_OPS test_sequence_expand) - -# test time consuming OPs in a separate process for expliot parallism -list(REMOVE_ITEM TEST_OPS test_parallel_executor) list(REMOVE_ITEM TEST_OPS test_warpctc_op) -list(REMOVE_ITEM TEST_OPS test_dyn_rnn) -list(REMOVE_ITEM TEST_OPS test_mul_op) - -# tests that need to be run in separate process. -list(REMOVE_ITEM TEST_OPS test_multihead_attention) -list(REMOVE_ITEM TEST_OPS test_calc_gradient) -list(REMOVE_ITEM TEST_OPS test_while_op) -list(REMOVE_ITEM TEST_OPS test_lod_array_length_op) -list(REMOVE_ITEM TEST_OPS test_reorder_lod_tensor) -list(REMOVE_ITEM TEST_OPS test_profiler) -list(REMOVE_ITEM TEST_OPS test_nvprof) -list(REMOVE_ITEM TEST_OPS test_normalization_wrapper) -list(REMOVE_ITEM TEST_OPS test_executor_and_mul) -list(REMOVE_ITEM TEST_OPS test_assign_value_op) -list(REMOVE_ITEM TEST_OPS test_array_read_write_op) -list(REMOVE_ITEM TEST_OPS test_lod_rank_table) -list(REMOVE_ITEM TEST_OPS test_weight_normalization) -list(REMOVE_ITEM TEST_OPS test_conditional_block) -list(REMOVE_ITEM TEST_OPS test_parameter) -list(REMOVE_ITEM TEST_OPS test_registry) -list(REMOVE_ITEM TEST_OPS test_fetch_var) -list(REMOVE_ITEM TEST_OPS test_parallel_op) -list(REMOVE_ITEM TEST_OPS test_dynrnn_static_input) list(REMOVE_ITEM TEST_OPS test_dist_train) - -# tests that can be bundled together in one python process for speed. -if(WITH_FAST_BUNDLE_TEST) - py_test_modules("test_all_ops" MODULES ${TEST_OPS}) -else() - foreach(TEST_OP ${TEST_OPS}) - py_test_modules(${TEST_OP} MODULES ${TEST_OP}) - endforeach(TEST_OP) -endif(WITH_FAST_BUNDLE_TEST) - -# -py_test_modules(test_sequence_expand MODULES test_sequence_expand) -# tests with high overhead -py_test_modules(test_parallel_executor MODULES test_parallel_executor) -py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR}) -py_test_modules(test_train_dyn_rnn MODULES test_dyn_rnn) -py_test_modules(test_mul_op MODULES test_mul_op) - -# tests that need to be run in separate process. -py_test_modules(test_multihead_attention MODULES test_multihead_attention) -py_test_modules(test_calc_gradient MODULES test_calc_gradient) -py_test_modules(test_while_op MODULES test_while_op) -py_test_modules(test_lod_array_length_op MODULES test_lod_array_length_op) -py_test_modules(test_reorder_lod_tensor MODULES test_reorder_lod_tensor) -py_test_modules(test_profiler MODULES test_profiler) -py_test_modules(test_nvprof MODULES test_nvprof) -py_test_modules(test_normalization_wrapper MODULES test_normalization_wrapper) -py_test_modules(test_executor_and_mul MODULES test_executor_and_mul) -py_test_modules(test_assign_value_op MODULES test_assign_value_op) -py_test_modules(test_array_read_write_op MODULES test_array_read_write_op) -py_test_modules(test_lod_rank_table MODULES test_lod_rank_table) -py_test_modules(test_weight_normalization MODULES test_weight_normalization) -py_test_modules(test_conditional_block MODULES test_conditional_block) -py_test_modules(test_parameter MODULES test_parameter) -py_test_modules(test_registry MODULES test_registry) -py_test_modules(test_fetch_var MODULES test_fetch_var) -py_test_modules(test_dynrnn_static_input MODULES test_dynrnn_static_input) -py_test_modules(test_parallel_op MODULES test_parallel_op) -py_test_modules(test_dist_train MODULES test_dist_train) +list(REMOVE_ITEM TEST_OPS test_parallel_executor_crf) +list(REMOVE_ITEM TEST_OPS test_parallel_executor_fetch_feed) +list(REMOVE_ITEM TEST_OPS test_dist_se_resnext) +list(REMOVE_ITEM TEST_OPS test_dist_transformer) +list(REMOVE_ITEM TEST_OPS test_parallel_executor_transformer) +list(REMOVE_ITEM TEST_OPS test_image_classification_resnet) +foreach(TEST_OP ${TEST_OPS}) + py_test_modules(${TEST_OP} MODULES ${TEST_OP}) +endforeach(TEST_OP) +py_test_modules(test_warpctc_op MODULES test_warpctc_op ENVS FLAGS_warpctc_dir=${WARPCTC_LIB_DIR} SERIAL) +if(WITH_DISTRIBUTE) + py_test_modules(test_dist_train MODULES test_dist_train SERIAL) + set_tests_properties(test_listen_and_serv_op PROPERTIES TIMEOUT 20) + set_tests_properties(test_dist_mnist PROPERTIES TIMEOUT 180) + set_tests_properties(test_dist_word2vec PROPERTIES TIMEOUT 180) +endif() +py_test_modules(test_parallel_executor_crf MODULES test_parallel_executor_crf SERIAL) +py_test_modules(test_parallel_executor_fetch_feed MODULES test_parallel_executor_fetch_feed SERIAL) +py_test_modules(test_dist_transformer MODULES test_dist_transformer SERIAL) +py_test_modules(test_dist_se_resnext MODULES test_dist_se_resnext SERIAL) +py_test_modules(test_parallel_executor_transformer MODULES test_parallel_executor_transformer SERIAL) +py_test_modules(test_image_classification_resnet MODULES test_image_classification_resnet SERIAL) diff --git a/python/paddle/fluid/tests/unittests/benchmark.py b/python/paddle/fluid/tests/unittests/benchmark.py new file mode 100644 index 0000000000..b98a92dcbe --- /dev/null +++ b/python/paddle/fluid/tests/unittests/benchmark.py @@ -0,0 +1,114 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import unittest +import time +import itertools +import six + +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.op import Operator +from op_test import OpTest + + +class BenchmarkSuite(OpTest): + def timeit_function(self, callback, iters, *args, **kwargs): + assert iters != 0, "Iters should >= 1" + start = time.time() + for i in range(iters): + callback(*args, **kwargs) + elapse = time.time() - start + return elapse / iters + + def _assert_cpu_gpu_same(self, cpu_outs, gpu_outs, fetch_list, atol): + for item_cpu_out, item_gpu_out, variable in zip(cpu_outs, gpu_outs, + fetch_list): + # the cpu version is baseline, expect gpu version keep same with cpu version. + expect = item_cpu_out + expect_t = np.array(item_cpu_out) + actual = item_gpu_out + actual_t = np.array(item_gpu_out) + var_name = variable if isinstance( + variable, six.string_types) else variable.name + self.assertTrue( + np.allclose( + actual_t, expect_t, atol=atol), + "Output (" + var_name + ") has diff" + str(actual_t) + "\n" + + str(expect_t)) + self.assertListEqual(actual.lod(), + expect.lod(), + "Output (" + var_name + ") has different lod") + + def _get_input_names(self): + inputs = [] + for name, value in list(self.inputs.items()): + if isinstance(value, list): + inputs.extend([sub_name for sub_name, _ in value]) + inputs.append(name) + return inputs + + def _get_output_names(self): + outputs = [] + for var_name, var in list(self.outputs.items()): + if isinstance(var, list): + for sub_var_name, sub_var in var: + outputs.append(sub_var_name) + else: + outputs.append(var_name) + if len(outputs) == 0: + for out_name, out_dup in Operator.get_op_outputs(self.op_type): + outputs.append(str(out_name)) + return outputs + + def check_output_stability(self, atol=1e-8): + places = self._get_places() + if len(places) < 2: + return + cpu_outs, fetch_list = self._calc_output(places[0]) + gpu_outs, _ = self._calc_output(places[1]) + self._assert_cpu_gpu_same(cpu_outs, gpu_outs, fetch_list, atol) + + def timeit_output_with_place(self, place, iters): + return self.timeit_function(self.calc_output, iters, place) + + def timeit_output(self, iters=100): + places = self._get_places() + elapses = [] + for place in places: + elapses.append(self.timeit_output_with_place(place, iters)) + for place, elapse in zip(places, elapses): + print("One pass of ({2}_op) at {0} cost {1}".format( + str(place), elapse, self.op_type)) + + def timeit_grad_with_place(self, place, iters=100): + inputs_to_check = self._get_input_names() + output_names = self._get_output_names() + return self.timeit_function( + self._get_gradient, + iters, + inputs_to_check, + place, + output_names, + no_grad_set=None) + + def timeit_grad(self, iters=100): + places = self._get_places() + elapses = [] + for place in places: + elapses.append(self.timeit_grad_with_place(place, iters)) + for place, elapse in zip(places, elapses): + print("One pass of ({2}_grad_op) at {0} cost {1}".format( + str(place), elapse, self.op_type)) diff --git a/python/paddle/fluid/tests/unittests/benchmark_sum_op.py b/python/paddle/fluid/tests/unittests/benchmark_sum_op.py new file mode 100644 index 0000000000..91a5f1bca4 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/benchmark_sum_op.py @@ -0,0 +1,82 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +import paddle.fluid as fluid +from benchmark import BenchmarkSuite +from op_test import OpTest + +# This is a demo op test case for operator benchmarking and high resolution number stability alignment. + + +class TestSumOp(BenchmarkSuite): + def setUp(self): + self.op_type = "sum" + self.customize_testcase() + self.customize_fetch_list() + + def customize_fetch_list(self): + """ + customize fetch list, configure the wanted variables. + >>> self.fetch_list = ["Out"] + """ + self.fetch_list = ["Out"] + # pass + + def customize_testcase(self): + # a test case + x0 = np.random.random((300, 400)).astype('float32') + x1 = np.random.random((300, 400)).astype('float32') + x2 = np.random.random((300, 400)).astype('float32') + + # NOTE: if the output is empty, then it will autofilled by benchmarkSuite. + # only the output dtype is used, the shape, lod and data is computed from input. + self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} + self.outputs = {"Out": x0 + x1 + x2} + + def test_check_output(self): + """ + compare the output with customized output. In this case, + you should set the correct output by hands. + >>> self.outputs = {"Out": x0 + x1 + x2} + """ + self.check_output(atol=1e-8) + + def test_output_stability(self): + # compare the cpu gpu output in high resolution. + self.check_output_stability() + + def test_timeit_output(self): + """ + perf the op, time cost will be averged in iters. + output example + >>> One pass of (sum_op) at CPUPlace cost 0.000461330413818 + >>> One pass of (sum_op) at CUDAPlace(0) cost 0.000556070804596 + """ + self.timeit_output(iters=100) + + def test_timeit_grad(self): + """ + perf the op gradient, time cost will be averged in iters. + output example + >>> One pass of (sum_grad_op) at CPUPlace cost 0.00279935121536 + >>> One pass of (sum_grad_op) at CUDAPlace(0) cost 0.00500632047653 + """ + self.timeit_grad(iters=100) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/dist_mnist.py b/python/paddle/fluid/tests/unittests/dist_mnist.py new file mode 100644 index 0000000000..8f5ba33f7c --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dist_mnist.py @@ -0,0 +1,103 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import argparse +import time +import math + +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +from paddle.fluid import core +import unittest +from multiprocessing import Process +import os +import signal +from functools import reduce +from test_dist_base import TestDistRunnerBase, runtime_main + +DTYPE = "float32" +paddle.dataset.mnist.fetch() + +# Fix seed for test +fluid.default_startup_program().random_seed = 1 +fluid.default_main_program().random_seed = 1 + + +def cnn_model(data): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=data, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu", + param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant())) + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu", + param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant())) + + SIZE = 10 + input_shape = conv_pool_2.shape + param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] + scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + + predict = fluid.layers.fc( + input=conv_pool_2, + size=SIZE, + act="softmax", + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale, seed=1))) + return predict + + +class TestDistMnist2x2(TestDistRunnerBase): + def get_model(self, batch_size=2): + # Input data + images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # Train program + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor) + + inference_program = fluid.default_main_program().clone() + # Optimization + opt = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, beta1=0.9, beta2=0.999) + + # Reader + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size) + opt.minimize(avg_cost) + return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + + +if __name__ == "__main__": + runtime_main(TestDistMnist2x2) diff --git a/python/paddle/fluid/tests/unittests/dist_se_resnext.py b/python/paddle/fluid/tests/unittests/dist_se_resnext.py new file mode 100644 index 0000000000..d576a173ce --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dist_se_resnext.py @@ -0,0 +1,248 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import argparse +import six +import time +import math + +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +from paddle.fluid import core +import unittest +from multiprocessing import Process +import os +import sys +import signal +from test_dist_base import TestDistRunnerBase, runtime_main + +# Fix seed for test +fluid.default_startup_program().random_seed = 1 +fluid.default_main_program().random_seed = 1 + +train_parameters = { + "input_size": [3, 224, 224], + "input_mean": [0.485, 0.456, 0.406], + "input_std": [0.229, 0.224, 0.225], + "learning_strategy": { + "name": "piecewise_decay", + "epochs": [30, 60, 90], + "steps": [0.1, 0.01, 0.001, 0.0001] + } +} + + +class SE_ResNeXt(): + def __init__(self, layers=50): + self.params = train_parameters + self.layers = layers + + def net(self, input, class_dim=1000): + layers = self.layers + supported_layers = [50, 101, 152] + assert layers in supported_layers, \ + "supported layers are {} but input layer is {}".format(supported_layers, layers) + if layers == 50: + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 6, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu') + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + elif layers == 101: + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 23, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=7, + stride=2, + act='relu') + conv = fluid.layers.pool2d( + input=conv, + pool_size=3, + pool_stride=2, + pool_padding=1, + pool_type='max') + elif layers == 152: + cardinality = 64 + reduction_ratio = 16 + depth = [3, 8, 36, 3] + num_filters = [128, 256, 512, 1024] + + conv = self.conv_bn_layer( + input=input, + num_filters=64, + filter_size=3, + stride=2, + act='relu') + conv = self.conv_bn_layer( + input=conv, num_filters=64, filter_size=3, stride=1, act='relu') + conv = self.conv_bn_layer( + input=conv, + num_filters=128, + filter_size=3, + stride=1, + act='relu') + conv = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_padding=1, \ + pool_type='max') + + for block in range(len(depth)): + for i in range(depth[block]): + conv = self.bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio) + + pool = fluid.layers.pool2d( + input=conv, pool_size=7, pool_type='avg', global_pooling=True) + drop = fluid.layers.dropout(x=pool, dropout_prob=0.2) + stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0) + out = fluid.layers.fc(input=drop, size=class_dim, act='softmax') + return out + + def shortcut(self, input, ch_out, stride): + ch_in = input.shape[1] + if ch_in != ch_out or stride != 1: + filter_size = 1 + return self.conv_bn_layer(input, ch_out, filter_size, stride) + else: + return input + + def bottleneck_block(self, input, num_filters, stride, cardinality, + reduction_ratio): + conv0 = self.conv_bn_layer( + input=input, num_filters=num_filters, filter_size=1, act='relu') + conv1 = self.conv_bn_layer( + input=conv0, + num_filters=num_filters, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu') + conv2 = self.conv_bn_layer( + input=conv1, num_filters=num_filters * 2, filter_size=1, act=None) + scale = self.squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio) + + short = self.shortcut(input, num_filters * 2, stride) + + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + def conv_bn_layer(self, + input, + num_filters, + filter_size, + stride=1, + groups=1, + act=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) / 2, + groups=groups, + act=None, + # avoid pserver CPU init differs from GPU + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant()), + bias_attr=False) + return fluid.layers.batch_norm(input=conv, act=act) + + def squeeze_excitation(self, input, num_channels, reduction_ratio): + pool = fluid.layers.pool2d( + input=input, pool_size=0, pool_type='avg', global_pooling=True) + stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0) + squeeze = fluid.layers.fc(input=pool, + size=num_channels / reduction_ratio, + act='relu') + stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0) + excitation = fluid.layers.fc(input=squeeze, + size=num_channels, + act='sigmoid') + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + +class DistSeResneXt2x2(TestDistRunnerBase): + def get_model(self, batch_size=2): + # Input data + image = fluid.layers.data( + name="data", shape=[3, 224, 224], dtype='float32') + label = fluid.layers.data(name="int64", shape=[1], dtype='int64') + + # Train program + model = SE_ResNeXt(layers=50) + out = model.net(input=image, class_dim=102) + cost = fluid.layers.cross_entropy(input=out, label=label) + + avg_cost = fluid.layers.mean(x=cost) + acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) + acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) + + # Evaluator + test_program = fluid.default_main_program().clone(for_test=True) + + # Optimization + total_images = 6149 # flowers + epochs = [30, 60, 90] + step = int(total_images / batch_size + 1) + + bd = [step * e for e in epochs] + base_lr = 0.1 + lr = [] + lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] + + optimizer = fluid.optimizer.Momentum( + # FIXME(typhoonzero): add back LR decay once ParallelExecutor fixed. + #learning_rate=fluid.layers.piecewise_decay( + # boundaries=bd, values=lr), + learning_rate=base_lr, + momentum=0.9, + regularization=fluid.regularizer.L2Decay(1e-4)) + optimizer.minimize(avg_cost) + + # Reader + train_reader = paddle.batch( + paddle.dataset.flowers.train(), batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size) + + return test_program, avg_cost, train_reader, test_reader, acc_top1, out + + +if __name__ == "__main__": + runtime_main(DistSeResneXt2x2) diff --git a/python/paddle/fluid/tests/unittests/dist_transformer.py b/python/paddle/fluid/tests/unittests/dist_transformer.py new file mode 100644 index 0000000000..ee8020a735 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dist_transformer.py @@ -0,0 +1,280 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import argparse +import time +import math + +import paddle +import paddle.fluid as fluid +from paddle.fluid import core +import os +import sys +import transformer_model +import paddle.dataset.wmt16 as wmt16 + +# Fix seed for test +fluid.default_startup_program().random_seed = 1 +fluid.default_main_program().random_seed = 1 + +WMT16_RECORDIO_FILE = "/tmp/wmt16.recordio" + + +class ModelHyperParams(object): + # Dictionary size for source and target language. This model directly uses + # paddle.dataset.wmt16 in which , and token has + # alreay been added, but the token is not added. Transformer requires + # sequences in a mini-batch are padded to have the same length. A token is + # added into the original dictionary in paddle.dateset.wmt16. + + # size of source word dictionary. + src_vocab_size = 10000 + # index for token in source language. + src_pad_idx = src_vocab_size + + # size of target word dictionay + trg_vocab_size = 10000 + # index for token in target language. + trg_pad_idx = trg_vocab_size + + # position value corresponding to the token. + pos_pad_idx = 0 + + # max length of sequences. It should plus 1 to include position + # padding token for position encoding. + max_length = 50 + + # the dimension for word embeddings, which is also the last dimension of + # the input and output of multi-head attention, position-wise feed-forward + # networks, encoder and decoder. + + d_model = 512 + # size of the hidden layer in position-wise feed-forward networks. + d_inner_hid = 1024 + # the dimension that keys are projected to for dot-product attention. + d_key = 64 + # the dimension that values are projected to for dot-product attention. + d_value = 64 + # number of head used in multi-head attention. + n_head = 8 + # number of sub-layers to be stacked in the encoder and decoder. + n_layer = 6 + # dropout rate used by all dropout layers. + dropout = 0.1 + + +def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): + """ + Pad the instances to the max sequence length in batch, and generate the + corresponding position data and attention bias. Then, convert the numpy + data to tensors and return a dict mapping names to tensors. + """ + + def __pad_batch_data(insts, + pad_idx, + is_target=False, + return_pos=True, + return_attn_bias=True, + return_max_len=True): + """ + Pad the instances to the max sequence length in batch, and generate the + corresponding position data and attention bias. + """ + return_list = [] + max_len = max(len(inst) for inst in insts) + inst_data = np.array( + [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) + return_list += [inst_data.astype("int64").reshape([-1, 1])] + if return_pos: + inst_pos = np.array([[ + pos_i + 1 if w_i != pad_idx else 0 + for pos_i, w_i in enumerate(inst) + ] for inst in inst_data]) + + return_list += [inst_pos.astype("int64").reshape([-1, 1])] + if return_attn_bias: + if is_target: + # This is used to avoid attention on paddings and subsequent + # words. + slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, + max_len)) + slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( + [-1, 1, max_len, max_len]) + slf_attn_bias_data = np.tile(slf_attn_bias_data, + [1, n_head, 1, 1]) * [-1e9] + else: + # This is used to avoid attention on paddings. + slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * + (max_len - len(inst)) + for inst in insts]) + slf_attn_bias_data = np.tile( + slf_attn_bias_data.reshape([-1, 1, 1, max_len]), + [1, n_head, max_len, 1]) + return_list += [slf_attn_bias_data.astype("float32")] + if return_max_len: + return_list += [max_len] + return return_list if len(return_list) > 1 else return_list[0] + + src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data( + [inst[0] for inst in insts], src_pad_idx, is_target=False) + trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data( + [inst[1] for inst in insts], trg_pad_idx, is_target=True) + trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], + [1, 1, trg_max_len, 1]).astype("float32") + lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False, + False, False, False) + lbl_weight = (lbl_word != trg_pad_idx).astype("float32").reshape([-1, 1]) + + return [ + src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, + trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight + ] + + +def transformer(use_feed): + assert not use_feed, "transfomer doesn't support feed yet" + return transformer_model.transformer( + ModelHyperParams.src_vocab_size + 1, + ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, ModelHyperParams.n_head, + ModelHyperParams.d_key, ModelHyperParams.d_value, + ModelHyperParams.d_model, ModelHyperParams.d_inner_hid, + ModelHyperParams.dropout, ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, ModelHyperParams.pos_pad_idx) + + +def get_model(): + avg_cost = transformer(use_feed=False) + optimizer = fluid.optimizer.Adam() + optimizer.minimize(avg_cost) + return avg_cost + + +def get_transpiler(trainer_id, main_program, pserver_endpoints, trainers): + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id=trainer_id, + program=main_program, + pservers=pserver_endpoints, + trainers=trainers) + return t + + +class DistTransformer2x2(object): + def run_pserver(self, pserver_endpoints, trainers, current_endpoint, + trainer_id): + get_model() + t = get_transpiler(trainer_id, + fluid.default_main_program(), pserver_endpoints, + trainers) + pserver_prog = t.get_pserver_program(current_endpoint) + startup_prog = t.get_startup_program(current_endpoint, pserver_prog) + + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + exe.run(pserver_prog) + + def _wait_ps_ready(self, pid): + retry_times = 20 + while True: + assert retry_times >= 0, "wait ps ready failed" + time.sleep(3) + print("waiting ps ready: ", pid) + try: + # the listen_and_serv_op would touch a file which contains the listen port + # on the /tmp directory until it was ready to process all the RPC call. + os.stat("/tmp/paddle.%d.port" % pid) + return + except os.error: + retry_times -= 1 + + def run_trainer(self, place, endpoints, trainer_id, trainers, is_dist=True): + avg_cost = get_model() + if is_dist: + t = get_transpiler(trainer_id, + fluid.default_main_program(), endpoints, + trainers) + trainer_prog = t.get_trainer_program() + else: + trainer_prog = fluid.default_main_program() + + startup_exe = fluid.Executor(place) + startup_exe.run(fluid.default_startup_program()) + + strategy = fluid.ExecutionStrategy() + strategy.num_threads = 1 + strategy.allow_op_delay = False + exe = fluid.ParallelExecutor( + True, loss_name=avg_cost.name, exec_strategy=strategy) + + first_loss, = exe.run(fetch_list=[avg_cost.name]) + print(first_loss) + for i in xrange(5): + _ = exe.run(fetch_list=[avg_cost.name]) + last_loss, = exe.run(fetch_list=[avg_cost.name]) + print(last_loss) + + +def main(role="pserver", + endpoints="127.0.0.1:9123", + trainer_id=0, + current_endpoint="127.0.0.1:9123", + trainers=1, + is_dist=True): + + reader = paddle.batch( + wmt16.train(ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size), + batch_size=transformer_model.batch_size) + + with fluid.recordio_writer.create_recordio_writer( + WMT16_RECORDIO_FILE) as writer: + for batch in reader(): + for tensor in prepare_batch_input( + batch, ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, ModelHyperParams.n_head): + t = fluid.LoDTensor() + t.set(tensor, fluid.CPUPlace()) + writer.append_tensor(t) + writer.complete_append_tensor() + + model = DistTransformer2x2() + if role == "pserver": + model.run_pserver(endpoints, trainers, current_endpoint, trainer_id) + else: + p = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + model.run_trainer(p, endpoints, trainer_id, trainers, is_dist) + + +if __name__ == "__main__": + if len(sys.argv) != 7: + print( + "Usage: python dist_transformer.py [pserver/trainer] [endpoints] [trainer_id] [current_endpoint] [trainers] [is_dist]" + ) + role = sys.argv[1] + endpoints = sys.argv[2] + trainer_id = int(sys.argv[3]) + current_endpoint = sys.argv[4] + trainers = int(sys.argv[5]) + is_dist = True if sys.argv[6] == "TRUE" else False + main( + role=role, + endpoints=endpoints, + trainer_id=trainer_id, + current_endpoint=current_endpoint, + trainers=trainers, + is_dist=is_dist) diff --git a/python/paddle/fluid/tests/unittests/dist_word2vec.py b/python/paddle/fluid/tests/unittests/dist_word2vec.py new file mode 100644 index 0000000000..54a70f4adb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/dist_word2vec.py @@ -0,0 +1,119 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import argparse +import time +import math +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +from paddle.fluid import core +import unittest +from multiprocessing import Process +import os +import signal +from test_dist_base import TestDistRunnerBase, runtime_main + +IS_SPARSE = True +EMBED_SIZE = 32 +HIDDEN_SIZE = 256 +N = 5 + +# Fix seed for test +fluid.default_startup_program().random_seed = 1 +fluid.default_main_program().random_seed = 1 + + +class TestDistWord2vec2x2(TestDistRunnerBase): + def get_model(self, batch_size=2): + BATCH_SIZE = batch_size + + def __network__(words): + embed_first = fluid.layers.embedding( + input=words[0], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr=fluid.ParamAttr( + name='shared_w', initializer=fluid.initializer.Constant())) + embed_second = fluid.layers.embedding( + input=words[1], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr=fluid.ParamAttr( + name='shared_w', initializer=fluid.initializer.Constant())) + embed_third = fluid.layers.embedding( + input=words[2], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr=fluid.ParamAttr( + name='shared_w', initializer=fluid.initializer.Constant())) + embed_forth = fluid.layers.embedding( + input=words[3], + size=[dict_size, EMBED_SIZE], + dtype='float32', + is_sparse=IS_SPARSE, + param_attr=fluid.ParamAttr( + name='shared_w', initializer=fluid.initializer.Constant())) + + concat_embed = fluid.layers.concat( + input=[embed_first, embed_second, embed_third, embed_forth], + axis=1) + hidden1 = fluid.layers.fc( + input=concat_embed, + size=HIDDEN_SIZE, + act='sigmoid', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant())) + predict_word = fluid.layers.fc( + input=hidden1, + size=dict_size, + act='softmax', + param_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant())) + cost = fluid.layers.cross_entropy( + input=predict_word, label=words[4]) + avg_cost = fluid.layers.mean(cost) + return avg_cost, predict_word + + word_dict = paddle.dataset.imikolov.build_dict() + dict_size = len(word_dict) + + first_word = fluid.layers.data(name='firstw', shape=[1], dtype='int64') + second_word = fluid.layers.data( + name='secondw', shape=[1], dtype='int64') + third_word = fluid.layers.data(name='thirdw', shape=[1], dtype='int64') + forth_word = fluid.layers.data(name='forthw', shape=[1], dtype='int64') + next_word = fluid.layers.data(name='nextw', shape=[1], dtype='int64') + avg_cost, predict_word = __network__( + [first_word, second_word, third_word, forth_word, next_word]) + + inference_program = paddle.fluid.default_main_program().clone() + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + train_reader = paddle.batch( + paddle.dataset.imikolov.train(word_dict, N), BATCH_SIZE) + test_reader = paddle.batch( + paddle.dataset.imikolov.test(word_dict, N), BATCH_SIZE) + + return inference_program, avg_cost, train_reader, test_reader, None, predict_word + + +if __name__ == "__main__": + runtime_main(TestDistWord2vec2x2) diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 299ab8e51f..b27d773f09 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -15,85 +15,30 @@ import unittest import numpy as np import random +import time import itertools -import paddle.fluid.core as core import collections + +import paddle.fluid as fluid +import paddle.fluid.core as core from paddle.fluid.backward import append_backward from paddle.fluid.op import Operator from paddle.fluid.executor import Executor -from paddle.fluid.framework import Program, OpProtoHolder +from paddle.fluid.framework import Program, OpProtoHolder, Variable +from testsuite import create_op, set_input, append_input_output, append_loss_ops +from functools import reduce +from six.moves import zip def randomize_probability(batch_size, class_num, dtype='float32'): prob = np.random.uniform( 0.1, 1.0, size=(batch_size, class_num)).astype(dtype) prob_sum = prob.sum(axis=1) - for i in xrange(len(prob)): + for i in range(len(prob)): prob[i] /= prob_sum[i] return prob -def create_op(scope, op_type, inputs, outputs, attrs): - kwargs = dict() - - def __create_var__(name, var_name): - scope.var(var_name).get_tensor() - kwargs[name].append(var_name) - - for in_name, in_dup in Operator.get_op_inputs(op_type): - if in_name in inputs: - kwargs[in_name] = [] - if in_dup: - sub_in = inputs[in_name] - for item in sub_in: - sub_in_name, _ = item[0], item[1] - __create_var__(in_name, sub_in_name) - else: - __create_var__(in_name, in_name) - - for out_name, out_dup in Operator.get_op_outputs(op_type): - if out_name in outputs: - kwargs[out_name] = [] - if out_dup: - sub_out = outputs[out_name] - for item in sub_out: - sub_out_name, _ = item[0], item[1] - __create_var__(out_name, sub_out_name) - else: - __create_var__(out_name, out_name) - - for attr_name in Operator.get_op_attr_names(op_type): - if attr_name in attrs: - kwargs[attr_name] = attrs[attr_name] - - return Operator(op_type, **kwargs) - - -def set_input(scope, op, inputs, place): - def __set_input__(var_name, var): - if isinstance(var, tuple) or isinstance(var, np.ndarray): - tensor = scope.find_var(var_name).get_tensor() - if isinstance(var, tuple): - tensor.set_lod(var[1]) - var = var[0] - tensor.set_dims(var.shape) - tensor.set(var, place) - elif isinstance(var, float): - scope.find_var(var_name).set_float(var) - elif isinstance(var, int): - scope.find_var(var_name).set_int(var) - - for in_name, in_dup in Operator.get_op_inputs(op.type()): - if in_name in inputs: - if in_dup: - sub_in = inputs[in_name] - for item in sub_in: - sub_in_name, sub_in_val = item[0], item[1] - __set_input__(sub_in_name, sub_in_val) - else: - __set_input__(in_name, inputs[in_name]) - - def get_numeric_gradient(place, scope, op, @@ -117,12 +62,16 @@ def get_numeric_gradient(place, return np.array(sum).mean() tensor_to_check = scope.find_var(input_to_check).get_tensor() - tensor_size = product(tensor_to_check.get_dims()) - tensor_to_check_dtype = tensor_to_check.dtype() + tensor_size = product(tensor_to_check.shape()) + tensor_to_check_dtype = tensor_to_check._dtype() if tensor_to_check_dtype == core.VarDesc.VarType.FP32: tensor_to_check_dtype = np.float32 elif tensor_to_check_dtype == core.VarDesc.VarType.FP64: tensor_to_check_dtype = np.float64 + elif tensor_to_check_dtype == core.VarDesc.VarType.FP16: + tensor_to_check_dtype = np.float16 + # set delta as np.float16, will automatic convert to float32, float64 + delta = np.array(delta).astype(np.float16) else: raise ValueError("Not supported data type " + str( tensor_to_check_dtype)) @@ -130,20 +79,31 @@ def get_numeric_gradient(place, gradient_flat = np.zeros(shape=(tensor_size, ), dtype=tensor_to_check_dtype) def __get_elem__(tensor, i): - if tensor_to_check_dtype == np.float32: - return tensor.get_float_element(i) + if tensor_to_check_dtype == np.float16: + numpy_tensor = np.array(tensor).astype(np.float16) + numpy_tensor = numpy_tensor.flatten() + return numpy_tensor[i] + elif tensor_to_check_dtype == np.float32: + return tensor._get_float_element(i) else: - return tensor.get_double_element(i) + return tensor._get_double_element(i) def __set_elem__(tensor, i, e): - if tensor_to_check_dtype == np.float32: - tensor.set_float_element(i, e) + if tensor_to_check_dtype == np.float16: + numpy_tensor = np.array(tensor).astype(np.float16) + shape = numpy_tensor.shape + numpy_tensor = numpy_tensor.flatten() + numpy_tensor[i] = e + numpy_tensor = numpy_tensor.reshape(shape).view(np.uint16) + tensor.set(numpy_tensor, place) + elif tensor_to_check_dtype == np.float32: + tensor._set_float_element(i, e) else: - tensor.set_double_element(i, e) + tensor._set_double_element(i, e) # we only compute gradient of one element each time. # we use a for loop to compute the gradient of every element. - for i in xrange(tensor_size): + for i in range(tensor_size): if in_place: set_input(scope, op, inputs, place) @@ -164,49 +124,7 @@ def get_numeric_gradient(place, __set_elem__(tensor_to_check, i, origin) gradient_flat[i] = (y_pos - y_neg) / delta / 2 - return gradient_flat.reshape(tensor_to_check.get_dims()) - - -def append_input_output(block, op_proto, np_list, is_input): - '''Insert VarDesc and generate Python variable instance''' - proto_list = op_proto.inputs if is_input else op_proto.outputs - - def create_var(block, name, np_list, var_proto): - if name not in np_list: - assert var_proto.intermediate, "{} not found".format(name) - shape = None - lod_level = None - else: - np_value = np_list[name] - if isinstance(np_value, tuple): - shape = list(np_value[0].shape) - lod_level = len(np_value[1]) - else: - shape = list(np_value.shape) - lod_level = 0 - return block.create_var( - dtype="float32", shape=shape, lod_level=lod_level, name=name) - - var_dict = {} - for var_proto in proto_list: - var_name = str(var_proto.name) - if is_input: - if (var_name not in np_list) and var_proto.dispensable: - continue - assert (var_name in np_list) or (var_proto.dispensable), \ - "Missing {} as input".format(var_name) - if var_proto.duplicable: - assert isinstance(np_list[var_name], list), \ - "Duplicable {} should be set as list".format(var_name) - var_list = [] - for (name, np_value) in np_list[var_name]: - var_list.append( - create_var(block, name, {name: np_value}, var_proto)) - var_dict[var_name] = var_list - else: - var_dict[var_name] = create_var(block, var_name, np_list, var_proto) - - return var_dict + return gradient_flat.reshape(tensor_to_check.shape()) class OpTest(unittest.TestCase): @@ -215,16 +133,49 @@ class OpTest(unittest.TestCase): '''Fix random seeds to remove randomness from tests''' cls._np_rand_state = np.random.get_state() cls._py_rand_state = random.getstate() + cls.call_once = False + cls.dtype = "float32" + cls.outputs = {} np.random.seed(123) random.seed(124) @classmethod def tearDownClass(cls): - '''Restore random seeds''' + """Restore random seeds""" np.random.set_state(cls._np_rand_state) random.setstate(cls._py_rand_state) + def try_call_once(self, data_type): + if not self.call_once: + self.call_once = True + self.dtype = data_type + # See the comment of np_dtype_to_fluid_dtype + # If the input type is uint16, we assume use float16 + # for lodtensor dtype. + if self.dtype == np.uint16: + self.dtype == np.float16 + + def infer_dtype_from_inputs_outputs(self, inputs, outputs): + def infer_dtype(numpy_dict): + assert isinstance( + numpy_dict, + dict), "self.inputs, self.outputs must be numpy_dict" + for var_name, var_value in numpy_dict.items(): + if isinstance(var_value, (np.ndarray, np.generic)): + self.try_call_once(var_value.dtype) + elif isinstance(var_value, (list, tuple)): + # the case of self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} + if len(var_value) > 1 and isinstance(var_value[1], ( + np.ndarray, np.generic)): + instance = var_value[1] + self.try_call_once(instance[1].dtype) + else: + self.try_call_once("float32") + + infer_dtype(inputs) + infer_dtype(outputs) + def feed_var(self, input_vars, place): feed_map = {} for var_name in input_vars: @@ -232,34 +183,37 @@ class OpTest(unittest.TestCase): for name, np_value in self.inputs[var_name]: tensor = core.LoDTensor() if isinstance(np_value, tuple): - tensor.set(np_value[0], place) - tensor.set_lod(np_value[1]) + tensor.set( + OpTest.np_value_to_fluid_value(np_value[0]), place) + tensor.set_recursive_sequence_lengths(np_value[1]) else: - tensor.set(np_value, place) + tensor.set( + OpTest.np_value_to_fluid_value(np_value), place) feed_map[name] = tensor else: tensor = core.LoDTensor() if isinstance(self.inputs[var_name], tuple): - tensor.set(self.inputs[var_name][0], place) - tensor.set_lod(self.inputs[var_name][1]) + tensor.set( + OpTest.np_value_to_fluid_value(self.inputs[var_name][ + 0]), place) + tensor.set_recursive_sequence_lengths(self.inputs[var_name][ + 1]) else: - tensor.set(self.inputs[var_name], place) + tensor.set( + OpTest.np_value_to_fluid_value(self.inputs[var_name]), + place) feed_map[var_name] = tensor return feed_map - def calc_output(self, place): - outs, _ = self._calc_output(place) - return outs - - def _calc_output(self, place): + def _append_ops(self, block): op_proto = OpProtoHolder.instance().get_op_proto(self.op_type) - - program = Program() - block = program.global_block() - - inputs = append_input_output(block, op_proto, self.inputs, True) - outputs = append_input_output(block, op_proto, self.outputs, False) + "infer datatype from inputs and outputs for this test case" + self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) + inputs = append_input_output(block, op_proto, self.inputs, True, + self.dtype) + outputs = append_input_output(block, op_proto, self.outputs, False, + self.dtype) op = block.append_op( type=self.op_type, inputs=inputs, @@ -269,22 +223,68 @@ class OpTest(unittest.TestCase): op.desc.infer_var_type(block.desc) op.desc.infer_shape(block.desc) - fetch_list = [] - for var_name, var in outputs.iteritems(): - if var_name in self.outputs: + def _get_io_vars(self, block, numpy_inputs): + inputs = {} + for name, value in numpy_inputs.items(): + if isinstance(value, list): + var_list = [ + block.var(sub_name) for sub_name, sub_value in value + ] + inputs[name] = var_list + else: + inputs[name] = block.var(name) + return inputs + + def _get_inputs(self, block): + return self._get_io_vars(block, self.inputs) + + def _get_outputs(self, block): + return self._get_io_vars(block, self.outputs) + + def calc_output(self, place): + outs, _ = self._calc_output(place) + return outs + + def _calc_output(self, place, parallel=False): + + program = Program() + block = program.global_block() + self._append_ops(block) + + inputs = self._get_inputs(block) + outputs = self._get_outputs(block) + feed_map = self.feed_var(inputs, place) + + if parallel: + use_cuda = False + if isinstance(place, fluid.CUDAPlace(0)): + use_cuda = True + executor = fluid.ParallelExecutor( + use_cuda=use_cuda, loss_name=loss.name, main_program=program) + else: + executor = Executor(place) + + fetch_list = getattr(self, "fetch_list", []) + # if the fetch_list is customized by user, we use it directly. + # if not, fill the fetch_list by the user configured outputs in test. + if len(fetch_list) == 0: + for var_name, var in outputs.items(): if isinstance(var, list): for v in var: fetch_list.append(v) else: fetch_list.append(var) - - feed_map = self.feed_var(inputs, place) - - exe = Executor(place) - outs = exe.run(program, - feed=feed_map, - fetch_list=fetch_list, - return_numpy=False) + # if the fetch_list still empty, fill the fetch_list by the operator output. + if len(fetch_list) == 0: + for out_name, out_dup in Operator.get_op_outputs(self.op_type): + fetch_list.append(str(out_name)) + # fetch_list = map(block.var, fetch_list) + if not isinstance(fetch_list[0], fluid.framework.Variable): + fetch_list = list(map(block.var, fetch_list)) + outs = executor.run(program, + feed=feed_map, + fetch_list=fetch_list, + return_numpy=False) return outs, fetch_list def check_output_with_place(self, place, atol): @@ -322,7 +322,8 @@ class OpTest(unittest.TestCase): str(place)) if isinstance(expect, tuple): self.assertListEqual( - actual.lod(), expect[1], "Output (" + sub_out_name + + actual.recursive_sequence_lengths(), expect[1], + "Output (" + sub_out_name + ") has different lod at " + str(place)) else: idx = find_actual(out_name, fetch_list) @@ -334,23 +335,34 @@ class OpTest(unittest.TestCase): np.allclose( actual_t, expect_t, atol=atol), "Output (" + out_name + ") has diff at " + str(place) + - str(actual_t) + "\n" + str(expect_t)) + "\nExpect " + str(expect_t) + "\n" + "But Got" + + str(actual_t)) if isinstance(expect, tuple): - self.assertListEqual(actual.lod(), expect[1], - "Output (" + out_name + + self.assertListEqual(actual.recursive_sequence_lengths(), + expect[1], "Output (" + out_name + ") has different lod at " + str(place)) - def check_output(self, atol=1e-5): - places = [core.CPUPlace()] + def _get_places(self): + if self.dtype == np.float16: + if core.is_compiled_with_cuda() and core.op_support_gpu( + self.op_type): + place = core.CUDAPlace(0) + if core.is_float16_supported(place): + return [place] + else: + return [] + places = [fluid.CPUPlace()] if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): places.append(core.CUDAPlace(0)) + return places + + def check_output(self, atol=1e-5): + places = self._get_places() for place in places: self.check_output_with_place(place, atol) def check_output_customized(self, checker): - places = [core.CPUPlace()] - if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): - places.append(core.CUDAPlace(0)) + places = self._get_places() for place in places: outs = self.calc_output(place) outs = [np.array(out) for out in outs] @@ -359,7 +371,7 @@ class OpTest(unittest.TestCase): def __assert_is_close(self, numeric_grads, analytic_grads, names, max_relative_error, msg_prefix): - for a, b, name in itertools.izip(numeric_grads, analytic_grads, names): + for a, b, name in zip(numeric_grads, analytic_grads, names): abs_a = np.abs(a) abs_a[abs_a < 1e-3] = 1 @@ -369,9 +381,9 @@ class OpTest(unittest.TestCase): def err_msg(): offset = np.argmax(diff_mat > max_relative_error) return ("%s Variable %s max gradient diff %f over limit %f, " - "the first error element is %d, %f, %f") % ( - msg_prefix, name, max_diff, max_relative_error, - offset, a.flatten()[offset], b.flatten()[offset]) + "the first error element is %d, expected %f, but got %f" + ) % (msg_prefix, name, max_diff, max_relative_error, + offset, a.flatten()[offset], b.flatten()[offset]) self.assertLessEqual(max_diff, max_relative_error, err_msg()) @@ -383,9 +395,7 @@ class OpTest(unittest.TestCase): in_place=False, max_relative_error=0.005, user_defined_grads=None): - places = [core.CPUPlace()] - if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type): - places.append(core.CUDAPlace(0)) + places = self._get_places() for place in places: self.check_grad_with_place(place, inputs_to_check, output_names, no_grad_set, numeric_grad_delta, @@ -432,50 +442,21 @@ class OpTest(unittest.TestCase): max_relative_error, "Gradient Check On %s" % str(place)) - @staticmethod - def _create_var_descs_(block, var_dict): - # FIXME: Try unify with `append_input_output` - for param_name in var_dict: - var = var_dict[param_name] - if not isinstance(var, list) and not isinstance(var, tuple): - var = [(param_name, var, None)] - if not isinstance(var[0], list) and not isinstance(var[0], tuple): - var = [(param_name, var[0], var[1])] - - for i, item in enumerate(var): - if not isinstance(item[0], basestring): - item = [[param_name] + list(item)] - if len(item) == 2: - if isinstance(item[1], tuple): - var[i] = [item[0], item[1][0], item[1][1]] - else: - # only set var name and value, set lod to None - var[i] = list(item) + [None] - var_descs = [(block.create_var( - name=name, shape=each.shape, dtype=each.dtype), each, lod) - for name, each, lod in var] - - yield param_name, var_descs - - @staticmethod - def _merge_list(iterable): - return reduce(lambda a, b: list(a) + list(b), iterable, []) - @staticmethod def _numpy_to_lod_tensor(np_value, lod, place): tensor = core.LoDTensor() tensor.set(np_value, place) if lod is not None: - tensor.set_lod(lod) + tensor.set_recursive_sequence_lengths(lod) return tensor @staticmethod def np_dtype_to_fluid_dtype(input): """Change the dtype of float16 numpy array - numpy float16 is binded to paddle::platform::float16 + numpy float16 is binded to paddle::platform::float16 in tensor_py.h via the help of uint16 data type since - the internal memory representation of float16 is + the internal memory representation of float16 is uint16_t in paddle and np.uint16 in numpy, which are themselves binded together by pybind. @@ -483,91 +464,54 @@ class OpTest(unittest.TestCase): input: input numpy array Returns: - input: The dtype of input will be changed to np.uint16 if + input: The dtype of input will be changed to np.uint16 if it is originally np.float16, such that the internal memory - of input will be reinterpreted as of dtype np.uint16. + of input will be reinterpreted as of dtype np.uint16. """ if input.dtype == np.float16: input.dtype = np.uint16 return input - def _get_gradient(self, input_to_check, place, output_names, no_grad_set): - prog = Program() - block = prog.global_block() - inputs_with_np = { - key: value - for (key, value) in OpTest._create_var_descs_( - block, getattr(self, 'inputs', {})) - } - outputs_with_np = { - key: val - for (key, val) in OpTest._create_var_descs_( - block, getattr(self, 'outputs', {})) - } - inputs = { - k: [item[0] for item in inputs_with_np[k]] - for k in inputs_with_np - } - outputs = { - k: [item[0] for item in outputs_with_np[k]] - for k in outputs_with_np - } - - op = block.append_op( - type=self.op_type, - inputs=inputs, - outputs=outputs, - attrs=getattr(self, 'attrs', {})) - - # infer variable type and infer shape in compile-time - op.desc.infer_var_type(block.desc) - op.desc.infer_shape(block.desc) - - mean_inputs = map(block.var, output_names) + @staticmethod + def fluid_dtype_to_np_dtype(self, dtype): + """ + See above, convert the dtype to normal type. + """ + if dtype == np.uint16: + dtype = np.float16 + return dtype - if len(mean_inputs) == 1: - loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1]) - op = block.append_op( - inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean') - op.desc.infer_var_type(block.desc) - op.desc.infer_shape(block.desc) - else: - avg_sum = [] - for cur_loss in mean_inputs: - cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1]) - op = block.append_op( - inputs={"X": [cur_loss]}, - outputs={"Out": [cur_avg_loss]}, - type="mean") - op.desc.infer_var_type(block.desc) - op.desc.infer_shape(block.desc) - avg_sum.append(cur_avg_loss) - - loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1]) - op_sum = block.append_op( - inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum') - op_sum.desc.infer_var_type(block.desc) - op_sum.desc.infer_shape(block.desc) - - loss = block.create_var(dtype=loss_sum.dtype, shape=[1]) - op_loss = block.append_op( - inputs={"X": loss_sum}, - outputs={"Out": loss}, - type='scale', - attrs={'scale': 1.0 / float(len(avg_sum))}) - op_loss.desc.infer_var_type(block.desc) - op_loss.desc.infer_shape(block.desc) + @staticmethod + def np_value_to_fluid_value(input): + if input.dtype == np.float16: + input = input.view(np.uint16) + return input + def _get_gradient(self, + input_to_check, + place, + output_names, + no_grad_set, + parallel=False): + prog = Program() + block = prog.global_block() + self._append_ops(block) + loss = append_loss_ops(block, output_names) param_grad_list = append_backward( loss=loss, parameter_list=input_to_check, no_grad_set=no_grad_set) - feed_dict = { - item[0].name: OpTest._numpy_to_lod_tensor(item[1], item[2], place) - for p_name in inputs_with_np for item in inputs_with_np[p_name] - } + inputs = self._get_inputs(block) + feed_dict = self.feed_var(inputs, place) fetch_list = [g for p, g in param_grad_list] - executor = Executor(place) - return map(np.array, - executor.run(prog, feed_dict, fetch_list, - return_numpy=False)) + if parallel: + use_cuda = False + if isinstance(place, fluid.CUDAPlace(0)): + use_cuda = True + executor = fluid.ParallelExecutor( + use_cuda=use_cuda, loss_name=loss.name, main_program=prog) + else: + executor = Executor(place) + return list( + map(np.array, + executor.run(prog, feed_dict, fetch_list, return_numpy=False))) diff --git a/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py new file mode 100644 index 0000000000..67c35e9de7 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/parallel_executor_test_base.py @@ -0,0 +1,113 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import multiprocessing +import os +import unittest +import paddle.fluid as fluid +import time +import numpy as np +import math +import sys + +__all__ = ['TestParallelExecutorBase'] + + +class TestParallelExecutorBase(unittest.TestCase): + def check_network_convergence(self, + method, + use_cuda=True, + memory_opt=True, + iter=50, + batch_size=None, + allow_op_delay=False, + feed_dict=None, + seed=None, + use_parallel_executor=True, + use_reduce=False, + optimizer=fluid.optimizer.Adam): + def run_executor(exe, feed, fetch_list, program=None): + if isinstance(exe, fluid.ParallelExecutor): + res = exe.run(fetch_list=fetch_list, feed=feed) + elif isinstance(exe, fluid.Executor): + if program is None: + program = fluid.default_main_program() + res = exe.run(program=program, feed=feed, fetch_list=fetch_list) + else: + raise ValueError('Unkown type exe') + return res + + main = fluid.Program() + startup = fluid.Program() + startup.random_seed = 1 # Fix random seed + main.random_seed = 1 + with fluid.program_guard(main, startup): + if seed is not None: + startup.random_seed = seed + main.random_seed = seed + + loss = method(use_feed=feed_dict is not None) + + optimizer().minimize(loss) + + if memory_opt: + fluid.memory_optimize(main) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + startup_exe = fluid.Executor(place) + startup_exe.run(startup) + exec_strategy = fluid.ExecutionStrategy() + exec_strategy.allow_op_delay = allow_op_delay + + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce \ + if use_reduce else fluid.BuildStrategy.ReduceStrategy.AllReduce + + if use_parallel_executor: + exe = fluid.ParallelExecutor( + use_cuda, + loss_name=loss.name, + exec_strategy=exec_strategy, + build_strategy=build_strategy) + else: + exe = fluid.Executor(place=place) + + if batch_size is not None: + batch_size *= fluid.core.get_cuda_device_count( + ) if use_cuda else int( + os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + begin = time.time() + first_loss, = run_executor( + exe=exe, feed=feed_dict, fetch_list=[loss.name]) + + for i in range(iter): + run_executor(exe=exe, feed=feed_dict, fetch_list=[]) + + last_loss, = run_executor( + exe=exe, feed=feed_dict, fetch_list=[loss.name]) + end = time.time() + + if batch_size is not None: + print("%.4f Instance per second" % ( + (batch_size * iter + 2) / (end - begin))) + + avg_last_loss_val = np.array(last_loss).mean() + avg_first_loss_val = np.array(first_loss).mean() + if math.isnan(float(avg_last_loss_val)) or math.isnan( + float(avg_first_loss_val)): + sys.exit("got NaN loss, training failed.") + + print(first_loss, last_loss) + # self.assertGreater(first_loss[0], last_loss[0]) + return first_loss, last_loss diff --git a/python/paddle/fluid/tests/unittests/test_accuracy_op.py b/python/paddle/fluid/tests/unittests/test_accuracy_op.py index 212a87e529..db1861fd10 100644 --- a/python/paddle/fluid/tests/unittests/test_accuracy_op.py +++ b/python/paddle/fluid/tests/unittests/test_accuracy_op.py @@ -26,7 +26,7 @@ class TestAccuracyOp(OpTest): label = np.random.randint(0, 2, (n, 1)) self.inputs = {'Out': infer, 'Indices': indices, "Label": label} num_correct = 0 - for rowid in xrange(n): + for rowid in range(n): for ele in indices[rowid]: if ele == label[rowid]: num_correct += 1 diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 5ed387fb12..34f9cf0620 100644 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -313,9 +313,9 @@ class TestAbs(OpTest): self.init_dtype() x = np.random.uniform(-1, 1, [4, 4]).astype(self.dtype) - # Because we set delta = 0.005 in caculating numeric gradient, + # Because we set delta = 0.005 in calculating numeric gradient, # if x is too small, such as 0.002, x_neg will be -0.003 - # x_pos will be 0.007, so the numeric gradient is unaccurate. + # x_pos will be 0.007, so the numeric gradient is inaccurate. # we should avoid this x[np.abs(x) < 0.005] = 0.02 out = np.abs(x) diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 3c65f3d44a..fa4b39879c 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -273,7 +273,7 @@ class TestSparseAdamOp(unittest.TestCase): self.setup(scope, place) op_args = dict() - for key, np_array in self.dense_inputs.iteritems(): + for key, np_array in self.dense_inputs.items(): var = scope.var(key).get_tensor() var.set(np_array, place) op_args[key] = key @@ -290,7 +290,7 @@ class TestSparseAdamOp(unittest.TestCase): adam_op = Operator("adam", **op_args) adam_op.run(scope, place) - for key, np_array in self.outputs.iteritems(): + for key, np_array in self.outputs.items(): out_var = scope.var(key).get_tensor() actual = np.array(out_var) actual = actual.reshape([actual.size]) diff --git a/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py b/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py new file mode 100644 index 0000000000..9c7d5d41f0 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_anchor_generator_op.py @@ -0,0 +1,110 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://w_idxw.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import sys +import math +from op_test import OpTest + + +def anchor_generator_in_python(input_feat, anchor_sizes, aspect_ratios, + variances, stride, offset): + num_anchors = len(aspect_ratios) * len(anchor_sizes) + layer_h = input_feat.shape[2] + layer_w = input_feat.shape[3] + out_dim = (layer_h, layer_w, num_anchors, 4) + out_anchors = np.zeros(out_dim).astype('float32') + + for h_idx in range(layer_h): + for w_idx in range(layer_w): + x_ctr = (w_idx * stride[0]) + offset * (stride[0] - 1) + y_ctr = (h_idx * stride[1]) + offset * (stride[1] - 1) + idx = 0 + for r in range(len(aspect_ratios)): + ar = aspect_ratios[r] + for s in range(len(anchor_sizes)): + anchor_size = anchor_sizes[s] + area = stride[0] * stride[1] + area_ratios = area / ar + base_w = np.round(np.sqrt(area_ratios)) + base_h = np.round(base_w * ar) + scale_w = anchor_size / stride[0] + scale_h = anchor_size / stride[1] + w = scale_w * base_w + h = scale_h * base_h + out_anchors[h_idx, w_idx, idx, :] = [ + (x_ctr - 0.5 * (w - 1)), (y_ctr - 0.5 * (h - 1)), + (x_ctr + 0.5 * (w - 1)), (y_ctr + 0.5 * (h - 1)) + ] + idx += 1 + + # set the variance. + out_var = np.tile(variances, (layer_h, layer_w, num_anchors, 1)) + out_anchors = out_anchors.astype('float32') + out_var = out_var.astype('float32') + return out_anchors, out_var + + +class TestAnchorGeneratorOp(OpTest): + def set_data(self): + self.init_test_params() + self.init_test_input() + self.init_test_output() + self.inputs = {'Input': self.input} + + self.attrs = { + 'anchor_sizes': self.anchor_sizes, + 'aspect_ratios': self.aspect_ratios, + 'stride': self.stride, + 'offset': self.offset, + 'variances': self.variances, + } + + self.outputs = {'Anchors': self.out_anchors, 'Variances': self.out_var} + + def test_check_output(self): + self.check_output() + + def setUp(self): + self.op_type = "anchor_generator" + self.set_data() + + def init_test_params(self): + self.batch_size = 1 + self.input_channels = 2 + self.layer_h = 2 + self.layer_w = 2 + + self.anchor_sizes = [64., 128., 256., 512.] + self.aspect_ratios = [0.5, 1., 2.] + self.stride = [16., 16.] + + self.offset = 0.5 + + self.variances = [0.1, 0.1, 0.2, 0.2] + + def init_test_input(self): + self.input = np.random.random( + (self.batch_size, self.input_channels, self.layer_h, + self.layer_w)).astype('float32') + + def init_test_output(self): + self.out_anchors, self.out_var = anchor_generator_in_python( + self.input, self.anchor_sizes, self.aspect_ratios, self.variances, + self.stride, self.offset) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py new file mode 100644 index 0000000000..e04412f809 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_arg_min_max_op.py @@ -0,0 +1,82 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class BaseTestCase(OpTest): + def initTestCase(self): + self.op_type = 'arg_min' + self.dims = (3, 4, 5) + self.dtype = 'float32' + self.axis = 0 + + def setUp(self): + self.initTestCase() + self.x = (1000 * np.random.random(self.dims)).astype(self.dtype) + self.inputs = {'X': self.x} + self.attrs = {'axis': self.axis} + if self.op_type == "arg_min": + self.outputs = {'Out': np.argmin(self.x, axis=self.axis)} + else: + self.outputs = {'Out': np.argmax(self.x, axis=self.axis)} + + def test_check_output(self): + self.check_output() + + +class TestCase0(BaseTestCase): + def initTestCase(self): + self.op_type = 'arg_max' + self.dims = (3, 4, 5) + self.dtype = 'float32' + self.axis = 0 + + +class TestCase1(BaseTestCase): + def initTestCase(self): + self.op_type = 'arg_min' + self.dims = (3, 4) + self.dtype = 'float64' + self.axis = 1 + + +class TestCase2(BaseTestCase): + def initTestCase(self): + self.op_type = 'arg_max' + self.dims = (3, 4) + self.dtype = 'int64' + self.axis = 0 + + +class TestCase3(BaseTestCase): + def initTestCase(self): + self.op_type = 'arg_max' + self.dims = (3, ) + self.dtype = 'int64' + self.axis = 0 + + +class TestCase4(BaseTestCase): + def initTestCase(self): + self.op_type = 'arg_min' + self.dims = (1, ) + self.dtype = 'int32' + self.axis = 0 + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_argsort_op.py b/python/paddle/fluid/tests/unittests/test_argsort_op.py new file mode 100644 index 0000000000..b29a102a38 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_argsort_op.py @@ -0,0 +1,56 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestArgsortOp(OpTest): + def setUp(self): + self.init_axis() + x = np.random.random((2, 3, 4, 5, 10)).astype("float32") + if self.axis < 0: + self.axis = self.axis + len(x.shape) + self.indices = np.argsort(x, kind='quicksort', axis=self.axis) + self.out = np.sort(x, kind='quicksort', axis=self.axis) + self.op_type = "argsort" + self.inputs = {'X': x} + self.attrs = {'axis': self.axis} + self.outputs = {'Indices': self.indices, 'Out': self.out} + + def init_axis(self): + self.axis = -1 + + def test_check_output(self): + self.check_output() + + +class TestArgsortOpAxis0(TestArgsortOp): + def init_axis(self): + self.axis = 0 + + +class TestArgsortOpAxis1(TestArgsortOp): + def init_axis(self): + self.axis = 1 + + +class TestArgsortOpAxisNeg2(TestArgsortOp): + def init_axis(self): + self.axis = -2 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py index a49e9035a4..0000fb0958 100644 --- a/python/paddle/fluid/tests/unittests/test_array_read_write_op.py +++ b/python/paddle/fluid/tests/unittests/test_array_read_write_op.py @@ -80,8 +80,9 @@ class TestArrayReadWrite(unittest.TestCase): append_backward(total_sum_scaled) - g_vars = map(default_main_program().global_block().var, - [each_x.name + "@GRAD" for each_x in x]) + g_vars = list( + map(default_main_program().global_block().var, + [each_x.name + "@GRAD" for each_x in x])) g_out = [ item.sum() for item in exe.run( diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index 948836039b..6580c70ca6 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -15,63 +15,42 @@ import unittest import numpy as np from op_test import OpTest +from paddle.fluid import metrics class TestAucOp(OpTest): def setUp(self): self.op_type = "auc" pred = np.random.random((128, 2)).astype("float32") - indices = np.random.randint(0, 2, (128, 2)) labels = np.random.randint(0, 2, (128, 1)) num_thresholds = 200 - self.inputs = {'Out': pred, 'Indices': indices, 'Label': labels} + tp = np.zeros((num_thresholds, )).astype("int64") + tn = np.zeros((num_thresholds, )).astype("int64") + fp = np.zeros((num_thresholds, )).astype("int64") + fn = np.zeros((num_thresholds, )).astype("int64") + + self.inputs = { + 'Predict': pred, + 'Label': labels, + 'TP': tp, + 'TN': tn, + 'FP': fp, + 'FN': fn + } self.attrs = {'curve': 'ROC', 'num_thresholds': num_thresholds} - # NOTE: sklearn use a different way to generate thresholds - # which will cause the result differs slightly: - # from sklearn.metrics import roc_curve, auc - # fpr, tpr, thresholds = roc_curve(labels, pred) - # auc_value = auc(fpr, tpr) - # we caculate AUC again using numpy for testing - kepsilon = 1e-7 # to account for floating point imprecisions - thresholds = [(i + 1) * 1.0 / (num_thresholds - 1) - for i in range(num_thresholds - 2)] - thresholds = [0.0 - kepsilon] + thresholds + [1.0 + kepsilon] - # caculate TP, FN, TN, FP count - tp_list = np.ndarray((num_thresholds, )) - fn_list = np.ndarray((num_thresholds, )) - tn_list = np.ndarray((num_thresholds, )) - fp_list = np.ndarray((num_thresholds, )) - for idx_thresh, thresh in enumerate(thresholds): - tp, fn, tn, fp = 0, 0, 0, 0 - for i, lbl in enumerate(labels): - if lbl: - if pred[i, 0] >= thresh: - tp += 1 - else: - fn += 1 - else: - if pred[i, 0] >= thresh: - fp += 1 - else: - tn += 1 - tp_list[idx_thresh] = tp - fn_list[idx_thresh] = fn - tn_list[idx_thresh] = tn - fp_list[idx_thresh] = fp - - epsilon = 1e-6 - tpr = (tp_list.astype("float32") + epsilon) / ( - tp_list + fn_list + epsilon) - fpr = fp_list.astype("float32") / (fp_list + tn_list + epsilon) - rec = (tp_list.astype("float32") + epsilon) / ( - tp_list + fp_list + epsilon) - - x = fpr[:num_thresholds - 1] - fpr[1:] - y = (tpr[:num_thresholds - 1] + tpr[1:]) / 2.0 - auc_value = np.sum(x * y) - - self.outputs = {'AUC': auc_value} + python_auc = metrics.Auc(name="auc", + curve='ROC', + num_thresholds=num_thresholds) + python_auc.update(pred, labels) + + self.outputs = { + 'AUC': python_auc.eval(), + 'TPOut': python_auc.tp_list, + 'FNOut': python_auc.fn_list, + 'TNOut': python_auc.tn_list, + 'FPOut': python_auc.fp_list + } def test_check_output(self): self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py index f6097d4b84..18fa546159 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_mkldnn_op.py @@ -52,5 +52,17 @@ class TestMKLDNNBatchNormOpInference(TestBatchNormOpInference): self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) +class TestMKLDNNBatchNormOpWithReluInference(TestBatchNormOpInference): + def init_kernel_type(self): + self.use_mkldnn = True + self.fuse_with_relu = True + + def test_check_output(self): + place = core.CPUPlace() + data_format = "NCHW" + + self.check_with_place(place, data_format, self.dtype, [2, 3, 4, 5]) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 4216d83653..f805fdc35f 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -128,8 +128,7 @@ def create_or_get_tensor(scope, var_name, var, place): tensor = scope.var(var_name).get_tensor() if var is not None: assert isinstance(var, np.ndarray) - tensor.set_lod([[]]) - tensor.set_dims(var.shape) + tensor.set_recursive_sequence_lengths([]) tensor.set(var, place) return tensor @@ -159,6 +158,7 @@ class TestBatchNormOpInference(unittest.TestCase): def setUp(self): self.dtype = np.float32 self.use_mkldnn = False + self.fuse_with_relu = False self.init_kernel_type() def __assert_close(self, tensor, np_array, msg, atol=1e-4): @@ -180,6 +180,8 @@ class TestBatchNormOpInference(unittest.TestCase): scale_shape = [c] x_val = np.random.random_sample(x_shape).astype(dtype) + # generate some negative values to test case with relu fused + x_val = x_val - 0.5 scale_val = np.random.random_sample(scale_shape).astype(np.float32) bias_val = np.random.random_sample(scale_shape).astype(np.float32) @@ -188,6 +190,8 @@ class TestBatchNormOpInference(unittest.TestCase): y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance, epsilon, data_layout).astype(dtype) + if self.fuse_with_relu: + y_out = np.maximum(y_out, 0) scope = core.Scope() @@ -233,6 +237,7 @@ class TestBatchNormOpInference(unittest.TestCase): is_test=True, data_layout=data_layout, use_mkldnn=self.use_mkldnn, + fuse_with_relu=self.fuse_with_relu, epsilon=epsilon) batch_norm_op.run(scope, place) @@ -265,6 +270,7 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): def setUp(self): self.dtype = np.float16 self.use_mkldnn = False + self.fuse_with_relu = False self.init_kernel_type() def test_check_output(self): @@ -284,6 +290,7 @@ class TestFP16BatchNormOpInference(TestBatchNormOpInference): class TestBatchNormOpTraining(unittest.TestCase): def setUp(self): self.use_mkldnn = False + self.fuse_with_relu = False self.data_formats = ["NCHW", "NHWC"] self.init_kernel_type() @@ -367,7 +374,8 @@ class TestBatchNormOpTraining(unittest.TestCase): "epsilon": epsilon, "is_test": False, "data_layout": data_layout, - "use_mkldnn": self.use_mkldnn + "use_mkldnn": self.use_mkldnn, + "fuse_with_relu": self.fuse_with_relu }) block.create_var(name='y@GRAD', dtype='float32', shape=y.shape) @@ -407,7 +415,7 @@ class TestBatchNormOpTraining(unittest.TestCase): self.__assert_close(scale_grad, out[6], "scale_grad") self.__assert_close(bias_grad, out[7], "bias_grad") - print "op test forward passed: ", str(place), data_layout + print("op test forward passed: ", str(place), data_layout) places = [core.CPUPlace()] diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py index 7976dd7c3f..4a3ac2a31e 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_decode_op.py @@ -20,6 +20,8 @@ from paddle.fluid.op import Operator class TestBeamSearchDecodeOp(unittest.TestCase): + """unittest of beam_search_decode_op""" + def setUp(self): self.scope = core.Scope() self.place = core.CPUPlace() @@ -32,32 +34,44 @@ class TestBeamSearchDecodeOp(unittest.TestCase): def test_get_set(self): ids = self.scope.var("ids").get_lod_tensor_array() - self.append_lod_tensor( - ids, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]], - np.array( - [1, 2, 3, 4, 5, 6], dtype="int64")) - self.append_lod_tensor( - ids, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]], - np.array( - [0, 1, 2, 3, 4, 5], dtype="int64")) - self.append_lod_tensor( - ids, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]], - np.array( - [0, 1, 2, 3, 4], dtype="int64")) - scores = self.scope.var("scores").get_lod_tensor_array() - self.append_lod_tensor( - scores, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]], - np.array( - [1, 2, 3, 4, 5, 6], dtype="float64")) - self.append_lod_tensor( - scores, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]], - np.array( - [0, 1, 2, 3, 4, 5], dtype="float64")) - self.append_lod_tensor( - scores, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]], - np.array( - [0, 1, 2, 3, 4], dtype="float64")) + # Construct sample data with 5 steps and 2 source sentences + # beam_size = 2, end_id = 1 + # start with start_id + [ + self.append_lod_tensor( + array, [[0, 1, 2], [0, 1, 2]], np.array( + [0, 0], dtype=dtype)) + for array, dtype in ((ids, "int64"), (scores, "float32")) + ] + [ + self.append_lod_tensor( + array, [[0, 1, 2], [0, 2, 4]], + np.array( + [2, 3, 4, 5], dtype=dtype)) + for array, dtype in ((ids, "int64"), (scores, "float32")) + ] + [ + self.append_lod_tensor( + array, [[0, 2, 4], [0, 2, 2, 4, 4]], + np.array( + [3, 1, 5, 4], dtype=dtype)) + for array, dtype in ((ids, "int64"), (scores, "float32")) + ] + [ + self.append_lod_tensor( + array, [[0, 2, 4], [0, 1, 2, 3, 4]], + np.array( + [1, 1, 3, 5], dtype=dtype)) + for array, dtype in ((ids, "int64"), (scores, "float32")) + ] + [ + self.append_lod_tensor( + array, [[0, 2, 4], [0, 0, 0, 2, 2]], + np.array( + [5, 1], dtype=dtype)) + for array, dtype in ((ids, "int64"), (scores, "float32")) + ] sentence_ids = self.scope.var("sentence_ids").get_tensor() sentence_scores = self.scope.var("sentence_scores").get_tensor() @@ -69,21 +83,25 @@ class TestBeamSearchDecodeOp(unittest.TestCase): Scores="scores", # outputs SentenceIds="sentence_ids", - SentenceScores="sentence_scores") + SentenceScores="sentence_scores", + beam_size=2, + end_id=1, ) beam_search_decode_op.run(self.scope, self.place) - expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]] + expected_lod = [[0, 2, 4], [0, 4, 7, 12, 17]] self.assertEqual(sentence_ids.lod(), expected_lod) self.assertEqual(sentence_scores.lod(), expected_lod) expected_data = np.array( - [2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], "int64") + [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64") self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data)) self.assertTrue( np.array_equal(np.array(sentence_scores), expected_data)) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestBeamSearchDecodeOpGPU(TestBeamSearchDecodeOp): def setUp(self): self.scope = core.Scope() diff --git a/python/paddle/fluid/tests/unittests/test_beam_search_op.py b/python/paddle/fluid/tests/unittests/test_beam_search_op.py index bc708f3aff..e8283fc942 100644 --- a/python/paddle/fluid/tests/unittests/test_beam_search_op.py +++ b/python/paddle/fluid/tests/unittests/test_beam_search_op.py @@ -26,9 +26,12 @@ def create_tensor(scope, name, np_data): class BeamSearchOpTester(unittest.TestCase): + """unittest of beam_search_op""" + def setUp(self): self.scope = core.Scope() self._create_ids() + self._create_pre_scores() self._create_scores() self._create_pre_ids() self.scope.var('selected_ids') @@ -37,7 +40,8 @@ class BeamSearchOpTester(unittest.TestCase): def test_run(self): op = Operator( 'beam_search', - pre_ids="pre_ids", + pre_ids='pre_ids', + pre_scores='pre_scores', ids='ids', scores='scores', selected_ids='selected_ids', @@ -47,15 +51,26 @@ class BeamSearchOpTester(unittest.TestCase): end_id=0, ) op.run(self.scope, core.CPUPlace()) selected_ids = self.scope.find_var("selected_ids").get_tensor() - print 'selected_ids', np.array(selected_ids) - print 'lod', selected_ids.lod() + selected_scores = self.scope.find_var("selected_scores").get_tensor() + self.assertTrue( + np.allclose( + np.array(selected_ids), np.array([4, 2, 3, 8])[:, np.newaxis])) + self.assertTrue( + np.allclose( + np.array(selected_scores), + np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis])) + self.assertEqual(selected_ids.lod(), [[0, 2, 4], [0, 1, 2, 3, 4]]) def _create_pre_ids(self): np_data = np.array([[1, 2, 3, 4]], dtype='int64') - tensor = create_tensor(self.scope, "pre_ids", np_data) + tensor = create_tensor(self.scope, 'pre_ids', np_data) + + def _create_pre_scores(self): + np_data = np.array([[0.1, 0.2, 0.3, 0.4]], dtype='float32') + tensor = create_tensor(self.scope, 'pre_scores', np_data) def _create_ids(self): - self.lod = [[0, 1, 4], [0, 1, 2, 3, 4]] + self.lod = [[0, 2, 4], [0, 1, 2, 3, 4]] np_data = np.array( [[4, 2, 5], [2, 1, 3], [3, 5, 2], [8, 2, 1]], dtype='int64') tensor = create_tensor(self.scope, "ids", np_data) diff --git a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py index bffb4f3b66..b04f25ef87 100644 --- a/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py +++ b/python/paddle/fluid/tests/unittests/test_bilinear_interp_op.py @@ -15,9 +15,13 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core -def bilinear_interp_np(input, out_h, out_w): +def bilinear_interp_np(input, out_h, out_w, out_size): + if out_size is not None: + out_h = out_size[0] + out_w = out_size[1] batch_size, channel, in_h, in_w = input.shape if out_h > 1: ratio_h = (in_h - 1.0) / (out_h - 1.0) @@ -42,19 +46,22 @@ def bilinear_interp_np(input, out_h, out_w): out[:, :, i, j] = h2lambda*(w2lambda*input[:, :, h, w] + w1lambda*input[:, :, h, w+wid]) + \ - h1lambda*(w2lambda*input[:, :, h+hid, w] + - w1lambda*input[:, :, h+hid, w+wid]) - return out.astype("float32") + h1lambda*(w2lambda*input[:, :, h+hid, w] + + w1lambda*input[:, :, h+hid, w+wid]) + return out.astype(input.dtype) class TestBilinearInterpOp(OpTest): def setUp(self): + self.out_size = None self.init_test_case() self.op_type = "bilinear_interp" input_np = np.random.random(self.input_shape).astype("float32") - output_np = bilinear_interp_np(input_np, self.out_h, self.out_w) - + output_np = bilinear_interp_np(input_np, self.out_h, self.out_w, + self.out_size) self.inputs = {'X': input_np} + if self.out_size is not None: + self.inputs['OutSize'] = self.out_size self.attrs = {'out_h': self.out_h, 'out_w': self.out_w} self.outputs = {'Out': output_np} @@ -68,6 +75,7 @@ class TestBilinearInterpOp(OpTest): self.input_shape = [2, 3, 4, 4] self.out_h = 2 self.out_w = 2 + self.out_size = np.array([3, 3]).astype("int32") class TestCase1(TestBilinearInterpOp): @@ -91,5 +99,68 @@ class TestCase3(TestBilinearInterpOp): self.out_w = 128 +class TestCase4(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [4, 1, 7, 8] + self.out_h = 1 + self.out_w = 1 + self.out_size = np.array([2, 2]).astype("int32") + + +class TestCase5(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [3, 3, 9, 6] + self.out_h = 12 + self.out_w = 12 + self.out_size = np.array([11, 11]).astype("int32") + + +class TestCase6(TestBilinearInterpOp): + def init_test_case(self): + self.input_shape = [1, 1, 128, 64] + self.out_h = 64 + self.out_w = 128 + self.out_size = np.array([65, 129]).astype("int32") + + +class TestBilinearInterpOpUint8(OpTest): + def setUp(self): + self.out_size = None + self.init_test_case() + self.op_type = "bilinear_interp" + input_np = np.random.randint( + low=0, high=256, size=self.input_shape).astype("uint8") + output_np = bilinear_interp_np(input_np, self.out_h, self.out_w, + self.out_size) + self.inputs = {'X': input_np} + if self.out_size is not None: + self.inputs['OutSize'] = self.out_size + self.attrs = {'out_h': self.out_h, 'out_w': self.out_w} + self.outputs = {'Out': output_np} + + def test_check_output(self): + self.check_output_with_place(place=core.CPUPlace(), atol=1) + + def init_test_case(self): + self.input_shape = [1, 3, 9, 6] + self.out_h = 10 + self.out_w = 9 + + +class TestCase1Uint8(TestBilinearInterpOpUint8): + def init_test_case(self): + self.input_shape = [2, 3, 128, 64] + self.out_h = 120 + self.out_w = 50 + + +class TestCase2Uint8(TestBilinearInterpOpUint8): + def init_test_case(self): + self.input_shape = [4, 1, 7, 8] + self.out_h = 5 + self.out_w = 13 + self.out_size = np.array([6, 15]).astype("int32") + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py index f7461ee6da..ceeca25b74 100644 --- a/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py +++ b/python/paddle/fluid/tests/unittests/test_bipartite_match_op.py @@ -48,7 +48,7 @@ def bipartite_match(distance, match_indices, match_dist): def argmax_match(distance, match_indices, match_dist, threshold): r, c = distance.shape - for j in xrange(c): + for j in range(c): if match_indices[j] != -1: continue col_dist = distance[:, j] @@ -65,23 +65,25 @@ def batch_bipartite_match(distance, lod, match_type=None, dist_threshold=None): distance (numpy.array) : The distance of two entries with shape [M, N]. lod (list of int): The offsets of each input in this batch. """ - n = len(lod) - 1 + n = len(lod) m = distance.shape[1] match_indices = -1 * np.ones((n, m), dtype=np.int) match_dist = np.zeros((n, m), dtype=np.float32) - for i in range(len(lod) - 1): - bipartite_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :], - match_dist[i, :]) + cur_offset = 0 + for i in range(n): + bipartite_match(distance[cur_offset:(cur_offset + lod[i]), :], + match_indices[i, :], match_dist[i, :]) if match_type == 'per_prediction': - argmax_match(distance[lod[i]:lod[i + 1], :], match_indices[i, :], - match_dist[i, :], dist_threshold) + argmax_match(distance[cur_offset:(cur_offset + lod[i]), :], + match_indices[i, :], match_dist[i, :], dist_threshold) + cur_offset += lod[i] return match_indices, match_dist class TestBipartiteMatchOpWithLoD(OpTest): def setUp(self): self.op_type = 'bipartite_match' - lod = [[0, 5, 11, 23]] + lod = [[5, 6, 12]] dist = np.random.random((23, 217)).astype('float32') match_indices, match_dist = batch_bipartite_match(dist, lod[0]) @@ -98,7 +100,7 @@ class TestBipartiteMatchOpWithLoD(OpTest): class TestBipartiteMatchOpWithoutLoD(OpTest): def setUp(self): self.op_type = 'bipartite_match' - lod = [[0, 8]] + lod = [[8]] dist = np.random.random((8, 17)).astype('float32') match_indices, match_dist = batch_bipartite_match(dist, lod[0]) @@ -112,10 +114,27 @@ class TestBipartiteMatchOpWithoutLoD(OpTest): self.check_output() +class TestBipartiteMatchOpWithoutLoDLargeScaleInput(OpTest): + def setUp(self): + self.op_type = 'bipartite_match' + lod = [[300]] + dist = np.random.random((300, 17)).astype('float32') + match_indices, match_dist = batch_bipartite_match(dist, lod[0]) + + self.inputs = {'DistMat': dist} + self.outputs = { + 'ColToRowMatchIndices': match_indices, + 'ColToRowMatchDist': match_dist, + } + + def test_check_output(self): + self.check_output() + + class TestBipartiteMatchOpWithPerPredictionType(OpTest): def setUp(self): self.op_type = 'bipartite_match' - lod = [[0, 5, 11, 23]] + lod = [[5, 6, 12]] dist = np.random.random((23, 237)).astype('float32') match_indices, match_dist = batch_bipartite_match(dist, lod[0], 'per_prediction', 0.5) diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 56f5af91d8..4ce9a4783e 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -19,7 +19,8 @@ import math from op_test import OpTest -def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): +def box_coder(target_box, prior_box, prior_box_var, output_box, code_type, + box_normalized): prior_box_x = ( (prior_box[:, 2] + prior_box[:, 0]) / 2).reshape(1, prior_box.shape[0]) prior_box_y = ( @@ -30,6 +31,9 @@ def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): (prior_box[:, 3] - prior_box[:, 1])).reshape(1, prior_box.shape[0]) prior_box_var = prior_box_var.reshape(1, prior_box_var.shape[0], prior_box_var.shape[1]) + if not box_normalized: + prior_box_height = prior_box_height + 1 + prior_box_width = prior_box_width + 1 if (code_type == "EncodeCenterSize"): target_box_x = ((target_box[:, 2] + target_box[:, 0]) / 2).reshape( @@ -40,6 +44,9 @@ def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): target_box.shape[0], 1) target_box_height = ((target_box[:, 3] - target_box[:, 1])).reshape( target_box.shape[0], 1) + if not box_normalized: + target_box_height = target_box_height + 1 + target_box_width = target_box_width + 1 output_box[:,:,0] = (target_box_x - prior_box_x) / prior_box_width / \ prior_box_var[:,:,0] @@ -64,21 +71,29 @@ def box_coder(target_box, prior_box, prior_box_var, output_box, code_type): output_box[:, :, 1] = target_box_y - target_box_height / 2 output_box[:, :, 2] = target_box_x + target_box_width / 2 output_box[:, :, 3] = target_box_y + target_box_height / 2 + if not box_normalized: + output_box[:, :, 2] = output_box[:, :, 2] - 1 + output_box[:, :, 3] = output_box[:, :, 3] - 1 -def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type): +def batch_box_coder(prior_box, prior_box_var, target_box, lod, code_type, + box_normalized): n = target_box.shape[0] m = prior_box.shape[0] output_box = np.zeros((n, m, 4), dtype=np.float32) - for i in range(len(lod) - 1): + cur_offset = 0 + for i in range(len(lod)): if (code_type == "EncodeCenterSize"): - box_coder(target_box[lod[i]:lod[i + 1], :], prior_box, - prior_box_var, output_box[lod[i]:lod[i + 1], :, :], - code_type) + box_coder(target_box[cur_offset:(cur_offset + lod[i]), :], + prior_box, prior_box_var, + output_box[cur_offset:(cur_offset + lod[i]), :, :], + code_type, box_normalized) elif (code_type == "DecodeCenterSize"): - box_coder(target_box[lod[i]:lod[i + 1], :, :], prior_box, - prior_box_var, output_box[lod[i]:lod[i + 1], :, :], - code_type) + box_coder(target_box[cur_offset:(cur_offset + lod[i]), :, :], + prior_box, prior_box_var, + output_box[cur_offset:(cur_offset + lod[i]), :, :], + code_type, box_normalized) + cur_offset += lod[i] return output_box @@ -88,20 +103,50 @@ class TestBoxCoderOp(OpTest): def setUp(self): self.op_type = "box_coder" - lod = [[0, 1, 2, 3, 4, 5]] + lod = [[1, 1, 1, 1, 1]] prior_box = np.random.random((10, 4)).astype('float32') prior_box_var = np.random.random((10, 4)).astype('float32') target_box = np.random.random((5, 10, 4)).astype('float32') code_type = "DecodeCenterSize" + box_normalized = False output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type) + lod[0], code_type, box_normalized) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, 'TargetBox': target_box, } - self.attrs = {'code_type': 'decode_center_size'} + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False + } + self.outputs = {'OutputBox': output_box} + + +class TestBoxCoderOpWithoutBoxVar(OpTest): + def test_check_output(self): + self.check_output() + + def setUp(self): + self.op_type = "box_coder" + lod = [[0, 1, 2, 3, 4, 5]] + prior_box = np.random.random((10, 4)).astype('float32') + prior_box_var = np.ones((10, 4)).astype('float32') + target_box = np.random.random((5, 10, 4)).astype('float32') + code_type = "DecodeCenterSize" + box_normalized = False + output_box = batch_box_coder(prior_box, prior_box_var, target_box, + lod[0], code_type, box_normalized) + + self.inputs = { + 'PriorBox': prior_box, + 'TargetBox': target_box, + } + self.attrs = { + 'code_type': 'decode_center_size', + 'box_normalized': False + } self.outputs = {'OutputBox': output_box} @@ -111,20 +156,21 @@ class TestBoxCoderOpWithLoD(OpTest): def setUp(self): self.op_type = "box_coder" - lod = [[0, 4, 12, 20]] + lod = [[4, 8, 8]] prior_box = np.random.random((10, 4)).astype('float32') prior_box_var = np.random.random((10, 4)).astype('float32') target_box = np.random.random((20, 4)).astype('float32') code_type = "EncodeCenterSize" + box_normalized = True output_box = batch_box_coder(prior_box, prior_box_var, target_box, - lod[0], code_type) + lod[0], code_type, box_normalized) self.inputs = { 'PriorBox': prior_box, 'PriorBoxVar': prior_box_var, 'TargetBox': (target_box, lod), } - self.attrs = {'code_type': 'encode_center_size'} + self.attrs = {'code_type': 'encode_center_size', 'box_normalized': True} self.outputs = {'OutputBox': output_box} diff --git a/python/paddle/fluid/tests/unittests/test_calc_gradient.py b/python/paddle/fluid/tests/unittests/test_calc_gradient.py index 06e676cd83..7f2a9e6971 100644 --- a/python/paddle/fluid/tests/unittests/test_calc_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_calc_gradient.py @@ -16,8 +16,6 @@ import unittest import paddle.fluid as fluid import paddle.fluid.layers as layers -import paddle.fluid.framework as framework -import paddle.fluid.optimizer as optimizer from paddle.fluid.backward import calc_gradient diff --git a/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py index 050df2801c..354110f1f9 100644 --- a/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py +++ b/python/paddle/fluid/tests/unittests/test_chunk_eval_op.py @@ -63,7 +63,7 @@ class TestChunkEvalOp(OpTest): # generate chunk beginnings chunk_begins = sorted( np.random.choice( - range(starts[-1]), num_chunks, replace=False)) + list(range(starts[-1])), num_chunks, replace=False)) seq_chunk_begins = [] begin_idx = 0 # divide chunks into sequences @@ -93,7 +93,7 @@ class TestChunkEvalOp(OpTest): self.num_infer_chunks + self.num_label_chunks - self.num_correct_chunks) correct_chunks = np.random.choice( - range(len(chunks)), self.num_correct_chunks, replace=False) + list(range(len(chunks))), self.num_correct_chunks, replace=False) infer_chunks = np.random.choice( [x for x in range(len(chunks)) if x not in correct_chunks], self.num_infer_chunks - self.num_correct_chunks, @@ -138,16 +138,17 @@ class TestChunkEvalOp(OpTest): infer.fill(self.num_chunk_types * self.num_tag_types) label = np.copy(infer) starts = np.random.choice( - range(1, self.batch_size), self.num_sequences - 1, + list(range(1, self.batch_size)), + self.num_sequences - 1, replace=False).tolist() starts.extend([0, self.batch_size]) starts = sorted(starts) self.num_correct_chunks, self.num_infer_chunks, self.num_label_chunks = self.gen_chunks( infer, label, starts) - self.inputs = { - 'Inference': (infer, [starts]), - 'Label': (label, [starts]) - } + lod = [] + for i in range(len(starts) - 1): + lod.append(starts[i + 1] - starts[i]) + self.inputs = {'Inference': (infer, [lod]), 'Label': (label, [lod])} precision = float( self.num_correct_chunks ) / self.num_infer_chunks if self.num_infer_chunks else 0 diff --git a/python/paddle/fluid/tests/unittests/test_concat_op.py b/python/paddle/fluid/tests/unittests/test_concat_op.py index 1e00d67d54..e9f3c45dc4 100644 --- a/python/paddle/fluid/tests/unittests/test_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_concat_op.py @@ -43,7 +43,7 @@ class TestConcatOp(OpTest): self.axis = 1 -class TestConcatOp2(OpTest): +class TestConcatOp2(TestConcatOp): def init_test_data(self): self.x0 = np.random.random((2, 3, 4, 5)).astype('float32') self.x1 = np.random.random((2, 3, 4, 5)).astype('float32') @@ -51,5 +51,16 @@ class TestConcatOp2(OpTest): self.axis = 1 +class TestConcatOp3(TestConcatOp): + def init_test_data(self): + self.x0 = np.random.random((1, 256, 170, 256)).astype('float32') + self.x1 = np.random.random((1, 128, 170, 256)).astype('float32') + self.x2 = np.random.random((1, 128, 170, 256)).astype('float32') + self.axis = 1 + + def test_check_grad(self): + pass + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_conditional_block.py b/python/paddle/fluid/tests/unittests/test_conditional_block.py index 084b8d3738..77869a1242 100644 --- a/python/paddle/fluid/tests/unittests/test_conditional_block.py +++ b/python/paddle/fluid/tests/unittests/test_conditional_block.py @@ -18,14 +18,15 @@ import paddle.fluid.core as core from paddle.fluid.framework import default_startup_program, default_main_program from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward +from paddle.fluid.layers.control_flow import ConditionalBlock import numpy -class ConditionalBlock(unittest.TestCase): +class ConditionalBlockTest(unittest.TestCase): def test_forward(self): data = layers.data(name='X', shape=[1], dtype='float32') data.stop_gradient = False - cond = layers.ConditionalBlock(inputs=[data]) + cond = ConditionalBlock(inputs=[data]) out = layers.create_tensor(dtype='float32') with cond.block(): hidden = layers.fc(input=data, size=10) @@ -38,7 +39,7 @@ class ConditionalBlock(unittest.TestCase): x = numpy.random.random(size=(10, 1)).astype('float32') outs = exe.run(feed={'X': x}, fetch_list=[out])[0] - print outs + print(outs) loss = layers.mean(out) append_backward(loss=loss) outs = exe.run( @@ -46,7 +47,7 @@ class ConditionalBlock(unittest.TestCase): fetch_list=[ default_main_program().block(0).var(data.name + "@GRAD") ])[0] - print outs + print(outs) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_const_value.py b/python/paddle/fluid/tests/unittests/test_const_value.py index d1075d514e..58ac6fa0a9 100644 --- a/python/paddle/fluid/tests/unittests/test_const_value.py +++ b/python/paddle/fluid/tests/unittests/test_const_value.py @@ -16,7 +16,7 @@ import unittest import paddle.fluid.framework as framework -class ConditionalBlock(unittest.TestCase): +class ConstantTest(unittest.TestCase): def test_const_value(self): self.assertEqual(framework.GRAD_VAR_SUFFIX, "@GRAD") self.assertEqual(framework.TEMP_VAR_NAME, "@TEMP@") diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py index db6be21baa..d0de7ad52c 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_mkldnn_op.py @@ -20,16 +20,19 @@ from test_conv2d_op import TestConv2dOp, TestWithPad, TestWithStride class TestMKLDNN(TestConv2dOp): def init_kernel_type(self): self.use_mkldnn = True + self.data_format = "NCHW" class TestMKLDNNWithPad(TestWithPad): def init_kernel_type(self): self.use_mkldnn = True + self.data_format = "NCHW" class TestMKLDNNWithStride(TestWithStride): def init_kernel_type(self): self.use_mkldnn = True + self.data_format = "NCHW" if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index a478649541..bb1cd87d61 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -66,6 +66,7 @@ class TestConv2dOp(OpTest): self.op_type = "conv2d" self.use_cudnn = False self.use_mkldnn = False + self.data_format = "AnyLayout" self.dtype = np.float32 self.init_kernel_type() self.init_group() @@ -93,7 +94,8 @@ class TestConv2dOp(OpTest): 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, - 'use_mkldnn': self.use_mkldnn + 'use_mkldnn': self.use_mkldnn, + 'data_format': self.data_format } self.outputs = {'Output': output} @@ -101,59 +103,35 @@ class TestConv2dOp(OpTest): return core.is_compiled_with_cuda() and self.use_cudnn def test_check_output(self): - if self.testcudnn(): - place = core.CUDAPlace(0) - self.check_output_with_place(place, atol=1e-5) - else: - self.check_output() + place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() + self.check_output_with_place(place, atol=1e-5) def test_check_grad(self): if self.dtype == np.float16: return - if self.testcudnn(): - place = core.CUDAPlace(0) - self.check_grad_with_place( - place, - set(['Input', 'Filter']), - 'Output', - max_relative_error=0.02) - else: - self.check_grad( - set(['Input', 'Filter']), 'Output', max_relative_error=0.02) + place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() + self.check_grad_with_place( + place, set(['Input', 'Filter']), 'Output', max_relative_error=0.02) def test_check_grad_no_filter(self): if self.dtype == np.float16: return - if self.testcudnn(): - place = core.CUDAPlace(0) - self.check_grad_with_place( - place, ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter'])) - else: - self.check_grad( - ['Input'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Filter'])) + place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() + self.check_grad_with_place( + place, ['Input'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Filter'])) def test_check_grad_no_input(self): if self.dtype == np.float16: return - if self.testcudnn(): - place = core.CUDAPlace(0) - self.check_grad_with_place( - place, ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) - else: - self.check_grad( - ['Filter'], - 'Output', - max_relative_error=0.02, - no_grad_set=set(['Input'])) + place = core.CUDAPlace(0) if self.testcudnn() else core.CPUPlace() + self.check_grad_with_place( + place, ['Filter'], + 'Output', + max_relative_error=0.02, + no_grad_set=set(['Input'])) def init_test_case(self): self.pad = [0, 0] diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index d864b9b348..af6cd99b0d 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -21,8 +21,11 @@ from op_test import OpTest def conv2dtranspose_forward_naive(input_, filter_, attrs): in_n, in_c, in_h, in_w = input_.shape - f_c, out_c, f_h, f_w = filter_.shape + f_c, f_out_c, f_h, f_w = filter_.shape + groups = attrs['groups'] assert in_c == f_c + out_c = f_out_c * groups + sub_in_c = in_c / groups stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ 'dilations'] @@ -36,15 +39,21 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs): for n in range(in_n): for i in range(in_h): for j in range(in_w): - input_masked = input_[n, :, i, j] # (c) - input_masked = np.reshape(input_masked, (in_c, 1, 1)) - input_masked = np.tile(input_masked, (1, f_h, f_w)) - - for k in range(out_c): - tmp_out = np.sum(input_masked * filter_[:, k, :, :], axis=0) - i1, i2 = i * stride[0], i * stride[0] + d_bolck_h - j1, j2 = j * stride[0], j * stride[0] + d_bolck_h - out[n, k, i1:i2:dilations[0], j1:j2:dilations[1]] += tmp_out + for g in range(groups): + input_masked = input_[n, g * sub_in_c:(g + 1) * sub_in_c, i, + j] # (c) + input_masked = np.reshape(input_masked, (sub_in_c, 1, 1)) + input_masked = np.tile(input_masked, (1, f_h, f_w)) + + for k in range(f_out_c): + tmp_out = np.sum( + input_masked * + filter_[g * sub_in_c:(g + 1) * sub_in_c, k, :, :], + axis=0) + i1, i2 = i * stride[0], i * stride[0] + d_bolck_h + j1, j2 = j * stride[0], j * stride[0] + d_bolck_h + out[n, g * f_out_c + k, i1:i2:dilations[0], j1:j2: + dilations[1]] += tmp_out out = out[:, :, pad[0]:out_h - pad[0], pad[1]:out_w - pad[1]] return out @@ -64,6 +73,7 @@ class TestConv2dTransposeOp(OpTest): self.attrs = { 'strides': self.stride, 'paddings': self.pad, + 'groups': self.groups, 'dilations': self.dilations, 'use_cudnn': self.use_cudnn, 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter @@ -127,6 +137,7 @@ class TestConv2dTransposeOp(OpTest): self.pad = [0, 0] self.stride = [1, 1] self.dilations = [1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] @@ -140,16 +151,29 @@ class TestWithPad(TestConv2dTransposeOp): self.pad = [1, 1] self.stride = [1, 1] self.dilations = [1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] +class TestWithGroups(TestConv2dTransposeOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [1, 1] + self.groups = 2 + self.input_size = [2, 4, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 3, 3, 3] + + class TestWithStride(TestConv2dTransposeOp): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] self.dilations = [1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3] @@ -159,6 +183,7 @@ class TestWithDilation(TestConv2dTransposeOp): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] + self.groups = 1 self.dilations = [2, 2] self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] @@ -166,16 +191,21 @@ class TestWithDilation(TestConv2dTransposeOp): # ------------ test_cudnn ------------ +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNN(TestConv2dTransposeOp): def init_op_type(self): self.use_cudnn = True self.op_type = "conv2d_transpose" +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNNWithPad(TestWithPad): def init_test_case(self): self.pad = [1, 1] self.stride = [1, 1] + self.groups = 1 self.dilations = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] @@ -186,10 +216,13 @@ class TestCUDNNWithPad(TestWithPad): self.op_type = "conv2d_transpose" +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNNWithStride(TestWithStride): def init_test_case(self): self.pad = [1, 1] self.stride = [2, 2] + self.groups = 1 self.dilations = [1, 1] self.input_size = [2, 3, 5, 5] # NCHW f_c = self.input_size[1] @@ -200,6 +233,36 @@ class TestCUDNNWithStride(TestWithStride): self.op_type = "conv2d_transpose" +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestCUDNNWithGroups(TestWithGroups): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [1, 1] + self.dilations = [1, 1] + self.groups = 2 + self.input_size = [2, 4, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 3, 3, 3] + + def init_op_type(self): + self.use_cudnn = True + self.op_type = "conv2d_transpose" + + +class TestDepthwiseConvTranspose(TestConv2dTransposeOp): + def init_test_case(self): + self.pad = [1, 1] + self.stride = [2, 2] + self.dilations = [1, 1] + self.input_size = [2, 8, 16, 16] # NCHW + self.groups = 8 + assert np.mod(self.input_size[1], self.groups) == 0 + f_c = self.input_size[1] / self.groups + self.filter_size = [self.input_size[1], f_c, 4, 4] + self.op_type = "depthwise_conv2d_transpose" + + # Please Don't remove the following code. # Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py index 55ba238710..300fa5e8bd 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_transpose_op.py @@ -21,8 +21,11 @@ from op_test import OpTest def conv3dtranspose_forward_naive(input_, filter_, attrs): in_n, in_c, in_d, in_h, in_w = input_.shape - f_c, out_c, f_d, f_h, f_w = filter_.shape + f_c, f_out_c, f_d, f_h, f_w = filter_.shape + groups = attrs['groups'] assert in_c == f_c + out_c = f_out_c * groups + sub_in_c = in_c / groups stride, pad, dilations = attrs['strides'], attrs['paddings'], attrs[ 'dilations'] @@ -39,18 +42,23 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs): for d in range(in_d): for i in range(in_h): for j in range(in_w): - input_masked = input_[n, :, d, i, j] # (c) - input_masked = np.reshape(input_masked, (in_c, 1, 1, 1)) - input_masked = np.tile(input_masked, (1, f_d, f_h, f_w)) - - for k in range(out_c): - tmp_out = np.sum(input_masked * filter_[:, k, :, :, :], - axis=0) - d1, d2 = d * stride[0], d * stride[0] + d_bolck_d - i1, i2 = i * stride[1], i * stride[1] + d_bolck_h - j1, j2 = j * stride[2], j * stride[2] + d_bolck_w - out[n, k, d1:d2:dilations[0], i1:i2:dilations[1], j1:j2: - dilations[2]] += tmp_out + for g in range(groups): + input_masked = input_[n, g * sub_in_c:(g + 1 + ) * sub_in_c, d, + i, j] # (c) + input_masked = np.reshape(input_masked, + (sub_in_c, 1, 1, 1)) + input_masked = np.tile(input_masked, (1, f_d, f_h, f_w)) + + for k in range(f_out_c): + tmp_out = np.sum(input_masked * filter_[ + g * sub_in_c:(g + 1) * sub_in_c, k, :, :, :], + axis=0) + d1, d2 = d * stride[0], d * stride[0] + d_bolck_d + i1, i2 = i * stride[1], i * stride[1] + d_bolck_h + j1, j2 = j * stride[2], j * stride[2] + d_bolck_w + out[n, g * f_out_c + k, d1:d2:dilations[0], i1:i2: + dilations[1], j1:j2:dilations[2]] += tmp_out out = out[:, :, pad[0]:out_d - pad[0], pad[1]:out_h - pad[1], pad[2]:out_w - pad[2]] @@ -72,6 +80,7 @@ class TestConv3dTransposeOp(OpTest): 'strides': self.stride, 'paddings': self.pad, 'dilations': self.dilations, + 'groups': self.groups, 'use_cudnn': self.use_cudnn, 'data_format': 'AnyLayout' # TODO(dzhwinter) : should be fix latter } @@ -134,6 +143,7 @@ class TestConv3dTransposeOp(OpTest): self.pad = [0, 0, 0] self.stride = [1, 1, 1] self.dilations = [1, 1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] @@ -147,16 +157,29 @@ class TestWithPad(TestConv3dTransposeOp): self.pad = [1, 1, 1] self.stride = [1, 1, 1] self.dilations = [1, 1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] +class TestWithGroups(TestConv3dTransposeOp): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [1, 1, 1] + self.dilations = [1, 1, 1] + self.groups = 2 + self.input_size = [2, 4, 5, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 3, 3, 3, 3] + + class TestWithStride(TestConv3dTransposeOp): def init_test_case(self): self.pad = [1, 1, 1] self.stride = [2, 2, 2] self.dilations = [1, 1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] @@ -167,23 +190,29 @@ class TestWithDilation(TestConv3dTransposeOp): self.pad = [1, 1, 1] self.stride = [1, 1, 1] self.dilations = [2, 2, 2] + self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] # ------------ test_cudnn ------------ +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNN(TestConv3dTransposeOp): def init_op_type(self): self.use_cudnn = True self.op_type = "conv3d_transpose" +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNNWithPad(TestWithPad): def init_test_case(self): self.pad = [1, 1, 1] self.stride = [1, 1, 1] self.dilations = [1, 1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] @@ -193,11 +222,14 @@ class TestCUDNNWithPad(TestWithPad): self.op_type = "conv3d_transpose" +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestCUDNNWithStride(TestWithStride): def init_test_case(self): self.pad = [1, 1, 1] self.stride = [2, 2, 2] self.dilations = [1, 1, 1] + self.groups = 1 self.input_size = [2, 3, 5, 5, 5] # NCDHW f_c = self.input_size[1] self.filter_size = [f_c, 6, 3, 3, 3] @@ -207,6 +239,23 @@ class TestCUDNNWithStride(TestWithStride): self.op_type = "conv3d_transpose" +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestCUDNNWithGroups(TestWithGroups): + def init_test_case(self): + self.pad = [1, 1, 1] + self.stride = [1, 1, 1] + self.dilations = [1, 1, 1] + self.groups = 2 + self.input_size = [2, 4, 5, 5, 5] # NCHW + f_c = self.input_size[1] + self.filter_size = [f_c, 3, 3, 3, 3] + + def init_op_type(self): + self.use_cudnn = True + self.op_type = "conv3d_transpose" + + # Please Don't remove the following code. # Currently, CI use cudnn V5.0 which not support dilation conv. # class TestCUDNNWithDilation(TestWithDilation): diff --git a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py index 5d4d244f43..9fdb7baa90 100644 --- a/python/paddle/fluid/tests/unittests/test_conv_shift_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv_shift_op.py @@ -22,8 +22,8 @@ def conv_shift_forward(x, y): M = x.shape[1] N = y.shape[1] y_half_width = (N - 1) / 2 - for i in xrange(M): - for j in xrange(N): + for i in range(M): + for j in range(N): out[:, i] += x[:, (i + j + M - y_half_width) % M] * y[:, j] return out diff --git a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py index 5e6f9a20a9..07c89eefc3 100644 --- a/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py +++ b/python/paddle/fluid/tests/unittests/test_create_op_doc_string.py @@ -18,7 +18,7 @@ import paddle.fluid.layers as layers class TestDocString(unittest.TestCase): def test_layer_doc_string(self): - print layers.dropout.__doc__ + print(layers.dropout.__doc__) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py index f397f542bb..122b076c2d 100644 --- a/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py +++ b/python/paddle/fluid/tests/unittests/test_crf_decoding_op.py @@ -22,9 +22,9 @@ from op_test import OpTest class CRFDecoding(object): def __init__(self, emission_weights, transition_weights, seq_start_positions): - assert (emission_weights.shape[0] == seq_start_positions[-1]) + assert (emission_weights.shape[0] == sum(seq_start_positions)) self.tag_num = emission_weights.shape[1] - self.seq_num = len(seq_start_positions) - 1 + self.seq_num = len(seq_start_positions) self.seq_start_positions = seq_start_positions self.x = emission_weights @@ -34,9 +34,9 @@ class CRFDecoding(object): self.w = transition_weights[2:, :] self.track = np.zeros( - (seq_start_positions[-1], self.tag_num), dtype="int64") + (sum(seq_start_positions), self.tag_num), dtype="int64") self.decoded_path = np.zeros( - (seq_start_positions[-1], 1), dtype="int64") + (sum(seq_start_positions), 1), dtype="int64") def _decode_one_sequence(self, decoded_path, x): seq_len, tag_num = x.shape @@ -71,9 +71,11 @@ class CRFDecoding(object): decoded_path[i - 1] = max_idx = track[i, max_idx] def decode(self): + cur_pos = 0 for i in range(self.seq_num): - start = self.seq_start_positions[i] - end = self.seq_start_positions[i + 1] + start = cur_pos + cur_pos += self.seq_start_positions[i] + end = cur_pos self._decode_one_sequence(self.decoded_path[start:end, :], self.x[start:end, :]) return self.decoded_path @@ -90,11 +92,13 @@ class TestCRFDecodingOp1(OpTest): TAG_NUM = 17 MAX_SEQ_LEN = 10 - lod = [[0]] + lod = [[]] + total_len = 0 for i in range(SEQ_NUM): - lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) + lod[-1].append(random.randint(1, MAX_SEQ_LEN)) + total_len += lod[-1][-1] emission = np.random.uniform(-1, 1, - [lod[-1][-1], TAG_NUM]).astype("float64") + [total_len, TAG_NUM]).astype("float64") transition = np.random.uniform(-0.5, 0.5, [TAG_NUM + 2, TAG_NUM]).astype("float64") @@ -126,7 +130,8 @@ class TestCRFDecodingOp2(OpTest): self.op_type = "crf_decoding" TAG_NUM = 5 - lod = [[0, 1, 3, 6, 10]] + lod = [[1, 2, 3, 4]] + total_len = sum(lod[-1]) transition = np.repeat( np.arange( TAG_NUM, dtype="float64").reshape(1, TAG_NUM), @@ -135,13 +140,13 @@ class TestCRFDecodingOp2(OpTest): emission = np.repeat( np.arange( TAG_NUM, dtype="float64").reshape(1, TAG_NUM), - lod[-1][-1], + total_len, axis=0) labels = np.random.randint( - low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int64") + low=0, high=TAG_NUM, size=(total_len, 1), dtype="int64") predicted_labels = np.ones( - (lod[-1][-1], 1), dtype="int64") * (TAG_NUM - 1) + (total_len, 1), dtype="int64") * (TAG_NUM - 1) expected_output = (labels == predicted_labels).astype("int64") self.inputs = { diff --git a/python/paddle/fluid/tests/unittests/test_crop_op.py b/python/paddle/fluid/tests/unittests/test_crop_op.py index 20cc3a643f..4016089c01 100644 --- a/python/paddle/fluid/tests/unittests/test_crop_op.py +++ b/python/paddle/fluid/tests/unittests/test_crop_op.py @@ -42,9 +42,9 @@ class TestCropOp(OpTest): def setUp(self): self.op_type = "crop" self.crop_by_input = False + self.offset_by_input = False self.attrs = {} self.initTestCase() - self.attrs['offsets'] = self.offsets if self.crop_by_input: self.inputs = { 'X': np.random.random(self.x_shape).astype("float32"), @@ -55,6 +55,10 @@ class TestCropOp(OpTest): self.inputs = { 'X': np.random.random(self.x_shape).astype("float32"), } + if self.offset_by_input: + self.inputs['Offsets'] = np.array(self.offsets).astype('int32') + else: + self.attrs['offsets'] = self.offsets self.outputs = { 'Out': crop(self.inputs['X'], self.offsets, self.crop_shape) } @@ -101,5 +105,22 @@ class TestCase4(TestCropOp): self.crop_by_input = True +class TestCase5(TestCropOp): + def initTestCase(self): + self.x_shape = (3, 4, 5) + self.crop_shape = [2, 2, 3] + self.offsets = [1, 0, 2] + self.offset_by_input = True + + +class TestCase6(TestCropOp): + def initTestCase(self): + self.x_shape = (10, 9, 14) + self.crop_shape = [3, 3, 5] + self.offsets = [3, 5, 4] + self.crop_by_input = True + self.offset_by_input = True + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_ctc_align.py b/python/paddle/fluid/tests/unittests/test_ctc_align.py index f166031a1c..131b4076f4 100644 --- a/python/paddle/fluid/tests/unittests/test_ctc_align.py +++ b/python/paddle/fluid/tests/unittests/test_ctc_align.py @@ -22,14 +22,16 @@ from test_softmax_op import stable_softmax def CTCAlign(input, lod, blank, merge_repeated): lod0 = lod[0] result = [] - for i in range(len(lod0) - 1): + cur_offset = 0 + for i in range(len(lod0)): prev_token = -1 - for j in range(lod0[i], lod0[i + 1]): + for j in range(cur_offset, cur_offset + lod0[i]): token = input[j][0] if (token != blank) and not (merge_repeated and token == prev_token): result.append(token) prev_token = token + cur_offset += lod0[i] result = np.array(result).reshape([len(result), 1]).astype("int32") if len(result) == 0: result = np.array([-1]) @@ -39,7 +41,7 @@ def CTCAlign(input, lod, blank, merge_repeated): class TestCTCAlignOp(OpTest): def config(self): self.op_type = "ctc_align" - self.input_lod = [[0, 11, 18]] + self.input_lod = [[11, 7]] self.blank = 0 self.merge_repeated = False self.input = np.array( @@ -66,7 +68,7 @@ class TestCTCAlignOp(OpTest): class TestCTCAlignOpCase1(TestCTCAlignOp): def config(self): self.op_type = "ctc_align" - self.input_lod = [[0, 11, 19]] + self.input_lod = [[11, 8]] self.blank = 0 self.merge_repeated = True self.input = np.array( @@ -77,7 +79,7 @@ class TestCTCAlignOpCase1(TestCTCAlignOp): class TestCTCAlignOpCase2(TestCTCAlignOp): def config(self): self.op_type = "ctc_align" - self.input_lod = [[0, 4]] + self.input_lod = [[4]] self.blank = 0 self.merge_repeated = True self.input = np.array([0, 0, 0, 0]).reshape([4, 1]).astype("int32") diff --git a/python/paddle/fluid/tests/unittests/test_data_balance.py b/python/paddle/fluid/tests/unittests/test_data_balance.py new file mode 100644 index 0000000000..951282e8ba --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_data_balance.py @@ -0,0 +1,195 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.fluid as fluid +import paddle.v2 as paddle +import numpy as np + + +class TestDataBalance(unittest.TestCase): + def prepare_data(self): + def fake_data_generator(): + for n in range(self.total_ins_num): + yield np.ones((3, 4)) * n, n + + # Prepare data + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch( + fake_data_generator, batch_size=self.batch_size) + feeder = fluid.DataFeeder( + feed_list=[ + fluid.layers.data( + name='image', shape=[3, 4], dtype='float32'), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + self.num_batches = fluid.recordio_writer.convert_reader_to_recordio_file( + self.data_file_name, reader, feeder) + + def prepare_lod_data(self): + def fake_data_generator(): + for n in range(1, self.total_ins_num + 1): + d1 = (np.ones((n, 3)) * n).astype('float32') + d2 = (np.array(n).reshape((1, 1))).astype('int32') + yield d1, d2 + + # Prepare lod data + with fluid.program_guard(fluid.Program(), fluid.Program()): + with fluid.recordio_writer.create_recordio_writer( + filename=self.lod_data_file_name) as writer: + eof = False + generator = fake_data_generator() + while (not eof): + data_batch = [ + np.array([]).reshape((0, 3)), np.array([]).reshape( + (0, 1)) + ] + lod = [0] + for _ in range(self.batch_size): + try: + ins = next(generator) + except StopIteration: + eof = True + break + for i, d in enumerate(ins): + data_batch[i] = np.concatenate( + (data_batch[i], d), axis=0) + lod.append(lod[-1] + ins[0].shape[0]) + if data_batch[0].shape[0] > 0: + for i, d in enumerate(data_batch): + t = fluid.LoDTensor() + t.set(data_batch[i], fluid.CPUPlace()) + if i == 0: + t.set_lod([lod]) + writer.append_tensor(t) + writer.complete_append_tensor() + + def setUp(self): + self.use_cuda = fluid.core.is_compiled_with_cuda() + self.data_file_name = './data_balance_test.recordio' + self.lod_data_file_name = './data_balance_with_lod_test.recordio' + self.total_ins_num = 50 + self.batch_size = 10 + self.prepare_data() + self.prepare_lod_data() + + def main(self): + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data_reader = fluid.layers.io.open_files( + filenames=[self.data_file_name], + shapes=[[-1, 3, 4], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + if self.use_cuda: + data_reader = fluid.layers.double_buffer(data_reader) + image, label = fluid.layers.read_file(data_reader) + + place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + build_strategy = fluid.BuildStrategy() + build_strategy.enable_data_balance = True + parallel_exe = fluid.ParallelExecutor( + use_cuda=self.use_cuda, + main_program=main_prog, + build_strategy=build_strategy) + + if (parallel_exe.device_count > self.batch_size): + print("WARNING: Unittest TestDataBalance skipped. \ + For the result is not correct when device count \ + is larger than batch size.") + exit(0) + fetch_list = [image.name, label.name] + + data_appeared = [False] * self.total_ins_num + while (True): + try: + image_val, label_val = parallel_exe.run(fetch_list, + return_numpy=True) + except fluid.core.EOFException: + break + ins_num = image_val.shape[0] + broadcasted_label = np.ones( + (ins_num, 3, 4)) * label_val.reshape((ins_num, 1, 1)) + self.assertEqual(image_val.all(), broadcasted_label.all()) + for l in label_val: + self.assertFalse(data_appeared[l[0]]) + data_appeared[l[0]] = True + for i in data_appeared: + self.assertTrue(i) + + def main_lod(self): + main_prog = fluid.Program() + startup_prog = fluid.Program() + with fluid.program_guard(main_prog, startup_prog): + data_reader = fluid.layers.io.open_files( + filenames=[self.lod_data_file_name], + shapes=[[-1, 3], [-1, 1]], + lod_levels=[1, 0], + dtypes=['float32', 'int32']) + ins, label = fluid.layers.read_file(data_reader) + + place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + build_strategy = fluid.BuildStrategy() + build_strategy.enable_data_balance = True + parallel_exe = fluid.ParallelExecutor( + use_cuda=self.use_cuda, + main_program=main_prog, + build_strategy=build_strategy) + + if parallel_exe.device_count > self.batch_size: + print("WARNING: Unittest TestDataBalance skipped. \ + For the result is not correct when device count \ + is larger than batch size.") + exit(0) + fetch_list = [ins.name, label.name] + + data_appeared = [False] * self.total_ins_num + while (True): + try: + ins_tensor, label_tensor = parallel_exe.run( + fetch_list, return_numpy=False) + except fluid.core.EOFException: + break + + ins_val = np.array(ins_tensor) + label_val = np.array(label_tensor) + ins_lod = ins_tensor.lod()[0] + self.assertEqual(ins_val.shape[1], 3) + self.assertEqual(label_val.shape[1], 1) + self.assertEqual(len(ins_lod) - 1, label_val.shape[0]) + for i in range(0, len(ins_lod) - 1): + ins_elem = ins_val[ins_lod[i]:ins_lod[i + 1]][:] + label_elem = label_val[i][0] + self.assertEqual(ins_elem.all(), label_elem.all()) + self.assertFalse(data_appeared[int(label_elem - 1)]) + data_appeared[int(label_elem - 1)] = True + + for i in data_appeared: + self.assertTrue(i) + + def test_all(self): + self.main() + self.main_lod() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_debugger.py b/python/paddle/fluid/tests/unittests/test_debugger.py index 67b03f635b..870952f2f9 100644 --- a/python/paddle/fluid/tests/unittests/test_debugger.py +++ b/python/paddle/fluid/tests/unittests/test_debugger.py @@ -15,7 +15,7 @@ import unittest import paddle.fluid as fluid import paddle.fluid.core as core -from paddle.fluid import debuger +from paddle.fluid import debugger from paddle.fluid.framework import Program @@ -51,9 +51,9 @@ class TestDebugger(unittest.TestCase): outputs={"Out": mul_out}, attrs={"x_num_col_dims": 1}) - print(debuger.pprint_program_codes(p)) + print(debugger.pprint_program_codes(p)) - debuger.draw_block_graphviz(p.block(0), path="./test.dot") + debugger.draw_block_graphviz(p.block(0), path="./test.dot") if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py index a3bf7b544b..868bcca881 100644 --- a/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py +++ b/python/paddle/fluid/tests/unittests/test_default_scope_funcs.py @@ -39,7 +39,7 @@ class TestDefaultScopeFuncs(unittest.TestCase): self.assertTrue(i.is_int()) self.assertEqual(10, i.get_int()) - for _ in xrange(10): + for _ in range(10): scoped_function(__new_scope__) diff --git a/python/paddle/fluid/tests/unittests/test_desc_clone.py b/python/paddle/fluid/tests/unittests/test_desc_clone.py new file mode 100644 index 0000000000..8603d3a5b3 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_desc_clone.py @@ -0,0 +1,196 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np +import argparse +import time +import math + +import paddle +import paddle.fluid as fluid +import paddle.fluid.profiler as profiler +from paddle.fluid import core +import unittest +from multiprocessing import Process +import os +import signal +import collections + +SEED = 1 +DTYPE = "float32" +paddle.dataset.mnist.fetch() + + +# random seed must set before configuring the network. +# fluid.default_startup_program().random_seed = SEED +def cnn_model(data): + conv_pool_1 = fluid.nets.simple_img_conv_pool( + input=data, + filter_size=5, + num_filters=20, + pool_size=2, + pool_stride=2, + act="relu") + conv_pool_2 = fluid.nets.simple_img_conv_pool( + input=conv_pool_1, + filter_size=5, + num_filters=50, + pool_size=2, + pool_stride=2, + act="relu") + + # TODO(dzhwinter) : refine the initializer and random seed settting + SIZE = 10 + input_shape = conv_pool_2.shape + param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE] + scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5 + + predict = fluid.layers.fc( + input=conv_pool_2, + size=SIZE, + act="softmax", + param_attr=fluid.param_attr.ParamAttr( + initializer=fluid.initializer.NormalInitializer( + loc=0.0, scale=scale))) + return predict + + +def get_model(batch_size): + # Input data + images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE) + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + # Train program + predict = cnn_model(images) + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(x=cost) + + # Evaluator + batch_size_tensor = fluid.layers.create_tensor(dtype='int64') + batch_acc = fluid.layers.accuracy( + input=predict, label=label, total=batch_size_tensor) + + inference_program = fluid.default_main_program().clone() + # Optimization + opt = fluid.optimizer.AdamOptimizer( + learning_rate=0.001, beta1=0.9, beta2=0.999) + + # Reader + train_reader = paddle.batch( + paddle.dataset.mnist.train(), batch_size=batch_size) + test_reader = paddle.batch( + paddle.dataset.mnist.test(), batch_size=batch_size) + opt.minimize(avg_cost) + return inference_program, avg_cost, train_reader, test_reader, batch_acc, predict + + +def get_transpiler(trainer_id, main_program, pserver_endpoints, trainers): + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id=trainer_id, + program=main_program, + pservers=pserver_endpoints, + trainers=trainers) + return t + + +def operator_equal(a, b): + for k, v in a.__dict__.iteritems(): + if isinstance(v, fluid.framework.Program) or \ + isinstance(v, fluid.framework.Block): + continue + + elif isinstance(v, core.OpDesc): + if v.serialize_to_string() != b.__dict__[k].serialize_to_string(): + raise ValueError("In operator_equal not equal:{0}\n".format(k)) + + elif isinstance(v, collections.OrderedDict): + v0 = sorted(v.iteritems(), key=lambda x: x[0]) + v1 = sorted(b.__dict__[k].iteritems(), key=lambda x: x[0]) + + if v0 != v1: + raise ValueError("In operator_equal not equal:{0}\n".format(k)) + + elif (v != b.__dict__[k]): + raise ValueError("In operator_equal not equal:{0}\n".format(k)) + + return True + + +def block_equal(a, b): + for k, v in a.__dict__.iteritems(): + if isinstance(v, core.ProgramDesc) or isinstance( + v, fluid.framework.Program) or isinstance(v, core.BlockDesc): + continue + + elif k == "ops": + for i in range(0, len(a.ops)): + if not operator_equal(a.ops[i], b.ops[i]): + raise ValueError("In block_equal not equal:{0}\n".format(k)) + assert (len(a.ops) == len(b.ops)) + + elif isinstance(v, collections.OrderedDict): + v0 = sorted(v.iteritems(), key=lambda x: x[0]) + v1 = sorted(b.__dict__[k].iteritems(), key=lambda x: x[0]) + + if v0 != v1: + raise ValueError("In block_equal not equal:{0}\n".format(k)) + + elif (v != b.__dict__[k]): + raise ValueError("In block_equal not equal:{0}\n".format(k)) + + return True + + +def program_equal(a, b): + for k, v in a.__dict__.iteritems(): + if isinstance(v, core.ProgramDesc): + continue + + elif k == 'blocks': + for i in range(0, len(a.blocks)): + if not block_equal(a.blocks[i], b.blocks[i]): + raise ValueError("In operator_equal not equal:{0}\n".format( + k)) + return False + assert (len(a.blocks) == len(b.blocks)) + + elif (v != b.__dict__[k]): + raise ValueError("In program_equal not equal:{0}\n".format(k)) + + return True + + +class TestDistMnist(unittest.TestCase): + def test_desc_clone(self): + get_model(batch_size=20) + + pserver_endpoints = "127.0.0.1:9123" + trainers = 1 + current_endpoint = "127.0.0.1:9123" + t = get_transpiler(0, + fluid.default_main_program(), pserver_endpoints, + trainers) + + pserver_prog = t.get_pserver_program(current_endpoint) + startup_prog = t.get_startup_program(current_endpoint, pserver_prog) + main = pserver_prog.clone() + startup = startup_prog.clone() + + self.assertTrue(program_equal(main, pserver_prog)) + self.assertTrue(program_equal(startup, startup_prog)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_detection_map_op.py b/python/paddle/fluid/tests/unittests/test_detection_map_op.py index a905a854ad..8b66d1b270 100644 --- a/python/paddle/fluid/tests/unittests/test_detection_map_op.py +++ b/python/paddle/fluid/tests/unittests/test_detection_map_op.py @@ -74,13 +74,13 @@ class TestDetectionMAPOp(OpTest): self.evaluate_difficult = True self.ap_type = "integral" - self.label_lod = [[0, 2, 4]] + self.label_lod = [[2, 2]] # label difficult xmin ymin xmax ymax self.label = [[1, 0, 0.1, 0.1, 0.3, 0.3], [1, 1, 0.6, 0.6, 0.8, 0.8], [2, 0, 0.3, 0.3, 0.6, 0.5], [1, 0, 0.7, 0.1, 0.9, 0.3]] # label score xmin ymin xmax ymax difficult - self.detect_lod = [[0, 3, 7]] + self.detect_lod = [[3, 4]] self.detect = [ [1, 0.3, 0.1, 0.0, 0.4, 0.3], [1, 0.7, 0.0, 0.1, 0.2, 0.3], [1, 0.9, 0.7, 0.6, 0.8, 0.8], [2, 0.8, 0.2, 0.1, 0.4, 0.4], @@ -89,7 +89,7 @@ class TestDetectionMAPOp(OpTest): ] # label score true_pos false_pos - self.tf_pos_lod = [[0, 3, 7]] + self.tf_pos_lod = [[3, 4]] self.tf_pos = [[1, 0.9, 1, 0], [1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] @@ -112,15 +112,19 @@ class TestDetectionMAPOp(OpTest): for i, count in enumerate(class_pos_count): class_pos_count_dict[i] = count - for i in range(len(true_pos_lod[0]) - 1): - start = true_pos_lod[0][i] - end = true_pos_lod[0][i + 1] + cur_pos = 0 + for i in range(len(true_pos_lod[0])): + start = cur_pos + cur_pos += true_pos_lod[0][i] + end = cur_pos for j in range(start, end): true_pos_dict[i].append(true_pos[j]) - for i in range(len(false_pos_lod[0]) - 1): - start = false_pos_lod[0][i] - end = false_pos_lod[0][i + 1] + cur_pos = 0 + for i in range(len(false_pos_lod[0])): + start = cur_pos + cur_pos += false_pos_lod[0][i] + end = cur_pos for j in range(start, end): false_pos_dict[i].append(false_pos[j]) @@ -130,19 +134,19 @@ class TestDetectionMAPOp(OpTest): label_number = self.class_num out_class_pos_count = [] - out_true_pos_lod = [0] + out_true_pos_lod = [] out_true_pos = [] - out_false_pos_lod = [0] + out_false_pos_lod = [] out_false_pos = [] for i in range(label_number): out_class_pos_count.append([label_count[i]]) true_pos_list = true_pos[i] out_true_pos += true_pos_list - out_true_pos_lod.append(len(out_true_pos)) + out_true_pos_lod.append(len(true_pos_list)) false_pos_list = false_pos[i] out_false_pos += false_pos_list - out_false_pos_lod.append(len(out_false_pos)) + out_false_pos_lod.append(len(false_pos_list)) return out_class_pos_count, out_true_pos, [ out_true_pos_lod @@ -160,7 +164,9 @@ class TestDetectionMAPOp(OpTest): label_count, true_pos, false_pos = get_input_pos( self.class_pos_count, self.true_pos, self.true_pos_lod, self.false_pos, self.false_pos_lod) - for (label, difficult, xmin, ymin, xmax, ymax) in self.label: + for v in self.label: + label = v[0] + difficult = False if len(v) == 5 else v[1] if self.evaluate_difficult: label_count[label] += 1 elif not difficult: @@ -170,7 +176,7 @@ class TestDetectionMAPOp(OpTest): true_pos[label].append([score, tp]) false_pos[label].append([score, fp]) - for (label, label_pos_num) in label_count.items(): + for (label, label_pos_num) in list(label_count.items()): if label_pos_num == 0 or label not in true_pos: continue label_true_pos = true_pos[label] label_false_pos = false_pos[label] @@ -239,12 +245,21 @@ class TestDetectionMAPOpSkipDiff(TestDetectionMAPOp): self.evaluate_difficult = False - self.tf_pos_lod = [[0, 2, 6]] + self.tf_pos_lod = [[2, 4]] # label score true_pos false_pos self.tf_pos = [[1, 0.7, 1, 0], [1, 0.3, 0, 1], [1, 0.2, 1, 0], [2, 0.8, 0, 1], [2, 0.1, 1, 0], [3, 0.2, 0, 1]] +class TestDetectionMAPOpWithoutDiff(TestDetectionMAPOp): + def init_test_case(self): + super(TestDetectionMAPOpWithoutDiff, self).init_test_case() + + # label xmin ymin xmax ymax + self.label = [[1, 0.1, 0.1, 0.3, 0.3], [1, 0.6, 0.6, 0.8, 0.8], + [2, 0.3, 0.3, 0.6, 0.5], [1, 0.7, 0.1, 0.9, 0.3]] + + class TestDetectionMAPOp11Point(TestDetectionMAPOp): def init_test_case(self): super(TestDetectionMAPOp11Point, self).init_test_case() @@ -256,9 +271,9 @@ class TestDetectionMAPOpMultiBatch(TestDetectionMAPOp): def init_test_case(self): super(TestDetectionMAPOpMultiBatch, self).init_test_case() self.class_pos_count = [0, 2, 1] - self.true_pos_lod = [[0, 0, 3, 5]] + self.true_pos_lod = [[0, 3, 2]] self.true_pos = [[0.7, 1.], [0.3, 0.], [0.2, 1.], [0.8, 0.], [0.1, 1.]] - self.false_pos_lod = [[0, 0, 3, 5]] + self.false_pos_lod = [[0, 3, 2]] self.false_pos = [[0.7, 0.], [0.3, 1.], [0.2, 0.], [0.8, 1.], [0.1, 0.]] diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py new file mode 100644 index 0000000000..4379463aca --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -0,0 +1,282 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import time + +import unittest +import os +import sys +import signal +import subprocess +import six + + +class TestDistRunnerBase(object): + def get_model(self, batch_size=2): + raise NotImplementedError( + "get_model should be implemented by child classes.") + + def get_transpiler(self, trainer_id, main_program, pserver_endpoints, + trainers): + # NOTE: import fluid until runtime, or else forking processes will cause error. + import paddle + import paddle.fluid as fluid + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id=trainer_id, + program=main_program, + pservers=pserver_endpoints, + trainers=trainers) + return t + + def run_pserver(self, pserver_endpoints, trainers, current_endpoint, + trainer_id): + import paddle + import paddle.fluid as fluid + self.get_model(batch_size=2) + t = self.get_transpiler(trainer_id, + fluid.default_main_program(), pserver_endpoints, + trainers) + pserver_prog = t.get_pserver_program(current_endpoint) + startup_prog = t.get_startup_program(current_endpoint, pserver_prog) + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + exe.run(pserver_prog) + + def run_trainer(self, place, endpoints, trainer_id, trainers, is_dist=True): + import paddle + import paddle.fluid as fluid + test_program, avg_cost, train_reader, test_reader, batch_acc, predict = \ + self.get_model(batch_size=2) + if is_dist: + t = self.get_transpiler(trainer_id, + fluid.default_main_program(), endpoints, + trainers) + trainer_prog = t.get_trainer_program() + else: + trainer_prog = fluid.default_main_program() + + startup_exe = fluid.Executor(place) + startup_exe.run(fluid.default_startup_program()) + + strategy = fluid.ExecutionStrategy() + strategy.num_threads = 1 + strategy.allow_op_delay = False + exe = fluid.ParallelExecutor( + True, loss_name=avg_cost.name, exec_strategy=strategy) + + feed_var_list = [ + var for var in trainer_prog.global_block().vars.values() + if var.is_data + ] + + feeder = fluid.DataFeeder(feed_var_list, place) + reader_generator = test_reader() + + data = next(reader_generator) + first_loss, = exe.run(fetch_list=[avg_cost.name], + feed=feeder.feed(data)) + print(first_loss) + + for i in six.moves.xrange(5): + data = next(reader_generator) + loss, = exe.run(fetch_list=[avg_cost.name], feed=feeder.feed(data)) + + data = next(reader_generator) + last_loss, = exe.run(fetch_list=[avg_cost.name], feed=feeder.feed(data)) + print(last_loss) + + +def runtime_main(test_class): + import paddle + import paddle.fluid as fluid + import paddle.fluid.core as core + + if len(sys.argv) != 7: + print( + "Usage: python dist_se_resnext.py [pserver/trainer] [endpoints] [trainer_id] [current_endpoint] [trainers] [is_dist]" + ) + role = sys.argv[1] + endpoints = sys.argv[2] + trainer_id = int(sys.argv[3]) + current_endpoint = sys.argv[4] + trainers = int(sys.argv[5]) + is_dist = True if sys.argv[6] == "TRUE" else False + + model = test_class() + if role == "pserver": + model.run_pserver(endpoints, trainers, current_endpoint, trainer_id) + else: + p = fluid.CUDAPlace(0) if core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + model.run_trainer(p, endpoints, trainer_id, trainers, is_dist) + + +class TestDistBase(unittest.TestCase): + def setUp(self): + self._trainers = 2 + self._pservers = 2 + self._ps_endpoints = "127.0.0.1:9123,127.0.0.1:9124" + self._python_interp = "python" + + def start_pserver(self, model_file, check_error_log): + ps0_ep, ps1_ep = self._ps_endpoints.split(",") + ps0_cmd = "%s %s pserver %s 0 %s %d TRUE" % \ + (self._python_interp, model_file, self._ps_endpoints, ps0_ep, + self._trainers) + ps1_cmd = "%s %s pserver %s 0 %s %d TRUE" % \ + (self._python_interp, model_file, self._ps_endpoints, ps1_ep, + self._trainers) + + ps0_pipe = subprocess.PIPE + ps1_pipe = subprocess.PIPE + if check_error_log: + print("ps0_cmd:", ps0_cmd) + print("ps1_cmd:", ps1_cmd) + ps0_pipe = open("/tmp/ps0_err.log", "wb") + ps1_pipe = open("/tmp/ps1_err.log", "wb") + + ps0_proc = subprocess.Popen( + ps0_cmd.split(" "), stdout=subprocess.PIPE, stderr=ps0_pipe) + ps1_proc = subprocess.Popen( + ps1_cmd.split(" "), stdout=subprocess.PIPE, stderr=ps1_pipe) + + if not check_error_log: + return ps0_proc, ps1_proc, None, None + else: + return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe + + def _wait_ps_ready(self, pid): + retry_times = 50 + while True: + assert retry_times >= 0, "wait ps ready failed" + time.sleep(3) + try: + # the listen_and_serv_op would touch a file which contains the listen port + # on the /tmp directory until it was ready to process all the RPC call. + os.stat("/tmp/paddle.%d.port" % pid) + return + except os.error as e: + sys.stderr.write('waiting for pserver: %s, left retry %d\n' % + (e, retry_times)) + retry_times -= 1 + + def check_with_place(self, model_file, delta=1e-3, check_error_log=False): + # *ATTENTION* THIS TEST NEEDS AT LEAST 2GPUS TO RUN + required_envs = { + "PATH": os.getenv("PATH"), + "PYTHONPATH": os.getenv("PYTHONPATH"), + "LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH"), + "FLAGS_fraction_of_gpu_memory_to_use": "0.15", + "FLAGS_cudnn_deterministic": "1" + } + + if check_error_log: + required_envs["GLOG_v"] = "7" + required_envs["GLOG_logtostderr"] = "1" + + # Run local to get a base line + env_local = {"CUDA_VISIBLE_DEVICES": "0"} + env_local.update(required_envs) + local_cmd = "%s %s trainer %s 0 %s %d FLASE" % \ + (self._python_interp, model_file, + "127.0.0.1:1234", "127.0.0.1:1234", 1) + if not check_error_log: + local_proc = subprocess.Popen( + local_cmd.split(" "), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=env_local) + else: + print("trainer cmd:", local_cmd) + err_log = open("/tmp/trainer.err.log", "wb") + local_proc = subprocess.Popen( + local_cmd.split(" "), + stdout=subprocess.PIPE, + stderr=err_log, + env=env_local) + + local_proc.wait() + out, err = local_proc.communicate() + local_ret = out + sys.stderr.write('local_loss: %s\n' % local_ret) + sys.stderr.write('local_stderr: %s\n' % err) + + # Run dist train to compare with local results + ps0, ps1, ps0_pipe, ps1_pipe = self.start_pserver(model_file, + check_error_log) + self._wait_ps_ready(ps0.pid) + self._wait_ps_ready(ps1.pid) + + ps0_ep, ps1_ep = self._ps_endpoints.split(",") + tr0_cmd = "%s %s trainer %s 0 %s %d TRUE" % \ + (self._python_interp, model_file, self._ps_endpoints, ps0_ep, + self._trainers) + tr1_cmd = "%s %s trainer %s 1 %s %d TRUE" % \ + (self._python_interp, model_file, self._ps_endpoints, ps1_ep, + self._trainers) + + env0 = {"CUDA_VISIBLE_DEVICES": "0"} + env1 = {"CUDA_VISIBLE_DEVICES": "1"} + env0.update(required_envs) + env1.update(required_envs) + FNULL = open(os.devnull, 'w') + + tr0_pipe = subprocess.PIPE + tr1_pipe = subprocess.PIPE + if check_error_log: + print("tr0_cmd:", tr0_cmd) + print("tr1_cmd:", tr1_cmd) + tr0_pipe = open("/tmp/tr0_err.log", "wb") + tr1_pipe = open("/tmp/tr1_err.log", "wb") + + tr0_proc = subprocess.Popen( + tr0_cmd.split(" "), + stdout=subprocess.PIPE, + stderr=tr0_pipe, + env=env0) + tr1_proc = subprocess.Popen( + tr1_cmd.split(" "), + stdout=subprocess.PIPE, + stderr=tr1_pipe, + env=env1) + + tr0_proc.wait() + tr1_proc.wait() + out, err = tr0_proc.communicate() + sys.stderr.write('dist_stderr: %s\n' % err) + loss_data0 = out + sys.stderr.write('dist_loss: %s\n' % loss_data0) + lines = loss_data0.split("\n") + dist_first_loss = eval(lines[0].replace(" ", ","))[0] + dist_last_loss = eval(lines[1].replace(" ", ","))[0] + + local_lines = local_ret.split("\n") + local_first_loss = eval(local_lines[0])[0] + local_last_loss = eval(local_lines[1])[0] + + # close trainer file + if check_error_log: + tr0_pipe.close() + tr1_pipe.close() + + ps0_pipe.close() + ps1_pipe.close() + # FIXME: use terminate() instead of sigkill. + os.kill(ps0.pid, signal.SIGKILL) + os.kill(ps1.pid, signal.SIGKILL) + FNULL.close() + + self.assertAlmostEqual(local_first_loss, dist_first_loss, delta=delta) + self.assertAlmostEqual(local_last_loss, dist_last_loss, delta=delta) diff --git a/python/paddle/fluid/tests/unittests/test_dist_mnist.py b/python/paddle/fluid/tests/unittests/test_dist_mnist.py new file mode 100644 index 0000000000..b3ccec9a7d --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_mnist.py @@ -0,0 +1,24 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from test_dist_base import TestDistBase + + +class TestDistSeResneXt2x2(TestDistBase): + def test_se_resnext(self): + self.check_with_place("dist_mnist.py", delta=1e-7) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dist_se_resnext.py b/python/paddle/fluid/tests/unittests/test_dist_se_resnext.py new file mode 100644 index 0000000000..a33a338fc1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_se_resnext.py @@ -0,0 +1,24 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from test_dist_base import TestDistBase + + +class TestDistSeResneXt2x2(TestDistBase): + def test_se_resnext(self): + self.check_with_place("dist_se_resnext.py", delta=1e-7) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dist_train.py b/python/paddle/fluid/tests/unittests/test_dist_train.py index 77e9a8f7e7..aab8969a96 100644 --- a/python/paddle/fluid/tests/unittests/test_dist_train.py +++ b/python/paddle/fluid/tests/unittests/test_dist_train.py @@ -12,16 +12,19 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os +import time import unittest +from multiprocessing import Process +import signal + +import numpy import paddle.fluid as fluid -import paddle.fluid.core as core import paddle.fluid.layers as layers -import numpy -from multiprocessing import Process -from threading import Thread -import os, sys -import time +from paddle.fluid.layers.io import ListenAndServ +from paddle.fluid.layers.io import Recv +from paddle.fluid.layers.io import Send class TestSendOp(unittest.TestCase): @@ -33,8 +36,10 @@ class TestSendOp(unittest.TestCase): p.daemon = True p.start() - time.sleep(10) - with open("/tmp/paddle.%d.selected_port" % p.pid, "r") as fn: + self.ps_timeout = 5 + self._wait_ps_ready(p.pid) + + with open("/tmp/paddle.%d.port" % p.pid, "r") as fn: selected_port = int(fn.readlines()[0]) self.init_client(place, selected_port) @@ -42,25 +47,41 @@ class TestSendOp(unittest.TestCase): self.assertTrue(numpy.allclose(self.local_out, self.dist_out)) # FIXME(typhoonzero): find a way to gracefully shutdown the server. - os.system("kill -9 %d" % p.pid) + os.kill(p.pid, signal.SIGKILL) p.join() + def _wait_ps_ready(self, pid): + start_left_time = self.ps_timeout + sleep_time = 0.5 + while True: + assert start_left_time >= 0, "wait ps ready failed" + time.sleep(sleep_time) + try: + # the listen_and_serv_op would touch a file which contains the listen port + # on the /tmp directory until it was ready to process all the RPC call. + os.stat("/tmp/paddle.%d.port" % pid) + return + except os.error: + start_left_time -= sleep_time + def init_serv(self, place): main = fluid.Program() with fluid.program_guard(main): - serv = layers.ListenAndServ( - "127.0.0.1:0", ["X"], optimizer_mode=False) + serv = ListenAndServ("127.0.0.1:0", ["X"], optimizer_mode=False) with serv.do(): + out_var = main.global_block().create_var( + name="scale_0.tmp_0", + psersistable=True, + dtype="float32", + shape=[32, 32]) x = layers.data( shape=[32, 32], dtype='float32', name="X", append_batch_size=False) fluid.initializer.Constant(value=1.0)(x, main.global_block()) - o = layers.scale(x=x, scale=10.0) - main.global_block().create_var( - name=o.name, psersistable=False, dtype=o.dtype, shape=o.shape) + layers.scale(x=x, scale=10.0, out=out_var) self.server_exe = fluid.Executor(place) self.server_exe.run(main) @@ -79,7 +100,10 @@ class TestSendOp(unittest.TestCase): dtype="float32", persistable=False, shape=[32, 32]) - o = layers.Send("127.0.0.1:%d" % port, [x], [get_var]) + fluid.initializer.Constant(value=2.3)(get_var, main.global_block()) + Send("127.0.0.1:%d" % port, [x]) + o = Recv("127.0.0.1:%d" % port, [get_var]) + exe = fluid.Executor(place) self.dist_out = exe.run(main, fetch_list=o) # o is a list diff --git a/python/paddle/fluid/tests/unittests/test_dist_transformer.py b/python/paddle/fluid/tests/unittests/test_dist_transformer.py new file mode 100644 index 0000000000..68cd35d751 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_transformer.py @@ -0,0 +1,27 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +from test_dist_base import TestDistBase + + +class TestDistTransformer2x2(TestDistBase): + def test_transformer(self): + # TODO(paddle-dev): check if the delta is OK. + # Usually start around ~8000 and converge to ~5000 + self.check_with_place("dist_transformer.py", delta=400) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dist_transpiler.py b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py new file mode 100644 index 0000000000..0543e62381 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_transpiler.py @@ -0,0 +1,570 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.fluid as fluid +from paddle.fluid.transpiler.distribute_transpiler import delete_ops +import traceback + + +class TranspilerTest(unittest.TestCase): + def setUp(self): + self.trainer_id = 0 + self.trainers = 2 + self.pservers = 2 + # NOTE: we do not actually bind this port + self.pserver_eps = "127.0.0.1:6174,127.0.0.1:6175" + self.pserver1_ep = "127.0.0.1:6174" + self.pserver2_ep = "127.0.0.1:6175" + self.sync_mode = True + self.transpiler = None + + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) + sgd_optimizer.minimize(avg_cost) + return + + def get_main_program(self): + main = fluid.Program() + with fluid.program_guard(main): + self.net_conf() + self.origin_prog = main.clone() + return main + + def get_trainer(self, config=None, sync_mode=True): + t = self._transpiler_instance(config, sync_mode) + return t.get_trainer_program() + + def get_pserver(self, ep, config=None, sync_mode=True): + t = self._transpiler_instance(config, sync_mode) + pserver = t.get_pserver_program(ep) + startup = t.get_startup_program(ep, pserver) + return pserver, startup + + def _transpiler_instance(self, config=None, sync_mode=True): + if not self.transpiler: + main = self.get_main_program() + self.transpiler = fluid.DistributeTranspiler(config=config) + self.transpiler.transpile( + self.trainer_id, + program=main, + pservers=self.pserver_eps, + trainers=self.trainers, + sync_mode=sync_mode) + + return self.transpiler + + def transpiler_test_impl(self): + pass + + def test_transpiler(self): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + self.transpiler_test_impl() + + +class TestBasicModel(TranspilerTest): + def transpiler_test_impl(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + pserver2, startup2 = self.get_pserver(self.pserver2_ep) + + trainer = self.get_trainer() + + self.assertEqual([op.type for op in trainer.global_block().ops], [ + 'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean', + 'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad', + 'elementwise_add_grad', 'send', 'mul_grad', 'split_byref', 'send', + 'send_barrier', 'recv', 'recv', 'fetch_barrier', 'concat' + ]) + + self.assertEqual(len(pserver.blocks), 3) + # block0: listen_and_serv + self.assertEqual([op.type for op in pserver.blocks[0].ops], + ["listen_and_serv"]) + # block1~2: optimize pass + self.assertEqual([op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "sgd"]) + # confirm startup program + self.assertEqual([op.type for op in startup.global_block().ops], + ["fill_constant", "fill_constant", "uniform_random"]) + # the variable #fc_w will be split into two blocks + fc_w_var = startup.global_block().var("fc_w.block1") + self.assertEqual(fc_w_var.shape, (500, 1000)) + # all parameters should be optimized on pserver + + pserver_params = [] + for prog in [pserver, pserver2]: + for blk in prog.blocks: + for op in blk.ops: + if "Param" in op.input_names: + param_name = op.input("Param")[0] + is_block_idx = param_name.find(".block") + if is_block_idx != -1: + origin_param_name = param_name[:is_block_idx] + else: + origin_param_name = param_name + pserver_params.append(origin_param_name) + trainer_params = [] + for op in self.origin_prog.global_block().ops: + if "Param" in op.input_names: + trainer_params.append(op.input("Param")[0]) + self.assertEqual(set(pserver_params), set(trainer_params)) + + +class TestBasicModelWithLargeBlockSize(TranspilerTest): + def transpiler_test_impl(self): + config = fluid.DistributeTranspilerConfig() + config.min_block_size = 1048576 + + pserver, startup = self.get_pserver(self.pserver1_ep, config) + pserver2, startup2 = self.get_pserver(self.pserver2_ep, config) + + trainer = self.get_trainer(config) + + self.assertEqual([op.type for op in trainer.global_block().ops], [ + 'mul', 'elementwise_add', 'elementwise_sub', 'square', 'mean', + 'fill_constant', 'mean_grad', 'square_grad', 'elementwise_sub_grad', + 'elementwise_add_grad', 'send', 'mul_grad', 'send', 'send_barrier', + 'recv', 'recv', 'fetch_barrier' + ]) + + self.assertEqual(len(pserver.blocks), 2) + # block0: listen_and_serv + self.assertEqual([op.type for op in pserver.blocks[0].ops], + ["listen_and_serv"]) + # block1~2: optimize pass + self.assertEqual([op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "sgd"]) + # confirm startup program + self.assertEqual([op.type for op in startup.global_block().ops], + ["fill_constant", "fill_constant"]) + # the variable #fc_w will be split into two blocks + fc_w_var = startup2.global_block().var("fc_w") + self.assertEqual(fc_w_var.shape, (1000, 1000)) + # all parameters should be optimized on pserver + + pserver_params = [] + for prog in [pserver, pserver2]: + for blk in prog.blocks: + for op in blk.ops: + if "Param" in op.input_names: + param_name = op.input("Param")[0] + is_block_idx = param_name.find(".block") + if is_block_idx != -1: + origin_param_name = param_name[:is_block_idx] + else: + origin_param_name = param_name + pserver_params.append(origin_param_name) + trainer_params = [] + for op in self.origin_prog.global_block().ops: + if "Param" in op.input_names: + trainer_params.append(op.input("Param")[0]) + self.assertEqual(set(pserver_params), set(trainer_params)) + + +class TestNoSliceVar(TranspilerTest): + def setUp(self): + super(TestNoSliceVar, self).setUp() + + def transpiler_test_impl(self): + config = fluid.DistributeTranspilerConfig() + config.slice_var_up = False + + _, startup = self.get_pserver(self.pserver1_ep, config) + _, startup2 = self.get_pserver(self.pserver2_ep, config) + + if "fc_w" in startup.global_block().vars: + fc_w_var = startup.global_block().vars["fc_w"] + elif "fc_w" in startup2.global_block().vars: + fc_w_var = startup2.global_block().vars["fc_w"] + + self.assertEqual(fc_w_var.shape, (1000, 1000)) + + +class TestLRDecay(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=1.0, + decay_steps=2100, + decay_rate=0.1, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + return + + def transpiler_test_impl(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + self.assertEqual(len(pserver.blocks), 4) + lr_decay_ops = [op.type for op in pserver.blocks[1].ops] + self.assertEqual(lr_decay_ops, [ + "increment", "cast", "fill_constant", "elementwise_div", "floor", + "fill_constant", "elementwise_pow", "fill_constant", + "elementwise_mul" + ]) + + +class TestLRDecayConditional(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.piecewise_decay([10000, 20000], + [1.0, 0.5, 1.0])) + sgd_optimizer.minimize(avg_cost) + return + + def transpiler_test_impl(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + serv_op = pserver.blocks[0].ops[0] + sub_blocks = [] + optimize_blocks = [] + for b in serv_op.all_attrs()["optimize_blocks"]: + optimize_blocks.append(b.idx) + for b in pserver.blocks: + if b.idx not in optimize_blocks: + sub_blocks.append(b.idx) + + self.assertEqual(len(pserver.blocks), 7) + lr_decay_ops = [op.type for op in pserver.blocks[1].ops] + self.assertEqual(lr_decay_ops, [ + "increment", "cast", "fill_constant", "fill_constant", "less_than", + "logical_not", "conditional_block", "fill_constant", + "fill_constant", "less_than", "logical_not", "logical_and", + "logical_and", "conditional_block", "fill_constant", + "conditional_block" + ]) + # test the condition blocks + for b in sub_blocks: + if b == 0: + continue + block = pserver.blocks[b] + self.assertEqual([op.type for op in block.ops], ["assign"]) + + +class TestL2Decay(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc( + input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr( + name='fc_w', + regularizer=fluid.regularizer.L2Decay(), + gradient_clip=fluid.clip.GradientClipByValue(0.1)), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.1) + sgd_optimizer.minimize(avg_cost) + return + + def transpiler_test_impl(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + self.assertEqual(len(pserver.blocks), 3) + self.assertEqual([op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "clip", "sgd"]) + self.assertEqual( + [op.type for op in pserver.blocks[2].ops], + ["sum", "scale", "clip", "scale", "elementwise_add", "sgd"]) + # TODO(typhoonzero): test clipping and L2Decay ops are removed from trainer + + +class TestL2DecayWithPiecewise(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + base_lr = 1.0 + bd = [1, 10, 20, 30] + lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] + sgd_optimizer = fluid.optimizer.Momentum( + learning_rate=fluid.layers.piecewise_decay( + boundaries=bd, values=lr), + momentum=0.9, + regularization=fluid.regularizer.L2Decay(1e-4)) + sgd_optimizer.minimize(avg_cost) + return + + def transpiler_test_impl(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + trainer = self.get_trainer() + + self.assertEqual(len(pserver.blocks), 9) + self.assertEqual([op.type for op in pserver.blocks[1].ops], [ + "increment", "cast", "fill_constant", "fill_constant", "less_than", + "logical_not", "conditional_block", "fill_constant", + "fill_constant", "less_than", "logical_not", "logical_and", + "logical_and", "conditional_block", "fill_constant", + "fill_constant", "less_than", "logical_not", "logical_and", + "logical_and", "conditional_block", "fill_constant", + "fill_constant", "less_than", "logical_not", "logical_and", + "logical_and", "conditional_block", "fill_constant", + "conditional_block" + ]) + self.assertEqual( + [op.type for op in pserver.blocks[7].ops], + ["sum", "scale", "scale", "elementwise_add", "momentum"]) + self.assertEqual( + [op.type for op in pserver.blocks[8].ops], + ["sum", "scale", "scale", "elementwise_add", "momentum"]) + + +class TestDistLookupTableBase(TranspilerTest): + def network_with_table(self, is_sparse, is_distributed): + def emb_pool(ids): + table_size = 1000 + emb_size = 64 + emb = fluid.layers.embedding( + input=ids, + size=[table_size, emb_size], + dtype='float32', + param_attr='shared_w', # share parameter + is_sparse=is_sparse, + is_distributed=is_distributed) + pool = fluid.layers.sequence_pool(input=emb, pool_type='average') + return pool + + title_ids = fluid.layers.data( + name='title_ids', shape=[1], dtype='int64', lod_level=1) + brand_ids = fluid.layers.data( + name='brand_ids', shape=[1], dtype='int64', lod_level=1) + title_emb = emb_pool(title_ids) + brand_emb = emb_pool(brand_ids) + fc0 = fluid.layers.concat(input=[title_emb, brand_emb], axis=1) + predict = fluid.layers.fc(input=fc0, + size=2, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + cost = fluid.layers.cross_entropy(input=predict, label=label) + avg_cost = fluid.layers.mean(cost) + optimizer = fluid.optimizer.Adam(learning_rate=0.003) + optimizer.minimize(avg_cost) + + +class TestLocalLookupTable(TestDistLookupTableBase): + def net_conf(self): + self.network_with_table(is_sparse=True, is_distributed=False) + + def transpiler_test_impl(self): + pserver1, startup1 = self.get_pserver(self.pserver1_ep) + + self.assertEqual(len(pserver1.blocks), 3) + # 0 listen_and_serv + # 1 optimize for fc_w or fc_b adam + self.assertEqual([op.type for op in pserver1.blocks[1].ops], + ["sum", "scale", "adam", "scale", "scale"]) + # 2 optimize for table adam + # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num + self.assertEqual([op.type for op in pserver1.blocks[2].ops], + ["sum", "adam", "scale", "scale"]) + + trainer = self.get_trainer() + self.assertEqual(len(trainer.blocks), 1) + ops = [ + 'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool', + 'concat', 'mul', 'elementwise_add', 'cross_entropy', 'mean', + 'fill_constant', 'mean_grad', 'cross_entropy_grad', + 'elementwise_add_grad', 'send', 'mul_grad', 'send', 'concat_grad', + 'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad', + 'lookup_table_grad', 'sum', 'split_selected_rows', 'send', + 'send_barrier', 'recv', 'recv', 'recv', 'fetch_barrier', 'concat' + ] + self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) + + +class TestDistLookupTable(TestDistLookupTableBase): + def net_conf(self): + self.network_with_table(is_sparse=True, is_distributed=True) + + def transpiler_test_impl(self): + pserver1, startup1 = self.get_pserver(self.pserver1_ep) + + self.assertEqual(len(pserver1.blocks), 6) + # 0 listen_and_serv + # 1 optimize for fc_w or fc_b adam + self.assertEqual([op.type for op in pserver1.blocks[1].ops], + ["sum", "scale", "adam", "scale", "scale"]) + # 2 optimize for table sgd + self.assertEqual([op.type for op in pserver1.blocks[2].ops], + ["sum", "sgd"]) + # 3 prefetch -> lookup_sparse_table for data0 + self.assertEqual([op.type for op in pserver1.blocks[3].ops], + ["lookup_sparse_table"]) + # 4 prefetch -> lookup_sparse_table for data1 + self.assertEqual([op.type for op in pserver1.blocks[4].ops], + ["lookup_sparse_table"]) + # 5 save table + self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"]) + + trainer = self.get_trainer() + self.assertEqual(len(trainer.blocks), 1) + ops = [ + 'split_ids', 'prefetch', 'merge_ids', 'sequence_pool', 'split_ids', + 'prefetch', 'merge_ids', 'sequence_pool', 'concat', 'mul', + 'elementwise_add', 'cross_entropy', 'mean', 'fill_constant', + 'mean_grad', 'cross_entropy_grad', 'elementwise_add_grad', 'send', + 'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad', + 'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad', + 'sum', 'split_ids', 'send', 'send_barrier', 'recv', 'recv', + 'fetch_barrier' + ] + self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) + + +class TestAsyncLocalLookupTable(TestDistLookupTableBase): + def net_conf(self): + self.network_with_table(is_sparse=True, is_distributed=False) + + def transpiler_test_impl(self): + config = fluid.DistributeTranspilerConfig() + pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False) + + self.assertEqual(len(pserver1.blocks), 3) + # 0 listen_and_serv + # 1 optimize for fc_w or fc_b adam + self.assertEqual([op.type for op in pserver1.blocks[1].ops], + ["adam", "scale", "scale"]) + # 2 optimize for table adam + # NOTE: if param is not selected rows, the grad will scaled to grad / trainer_num + self.assertEqual([op.type for op in pserver1.blocks[2].ops], + ["adam", "scale", "scale"]) + + trainer = self.get_trainer(config) + self.assertEqual(len(trainer.blocks), 1) + ops = [ + 'lookup_table', 'sequence_pool', 'lookup_table', 'sequence_pool', + 'concat', 'mul', 'elementwise_add', 'cross_entropy', 'mean', + 'fill_constant', 'mean_grad', 'cross_entropy_grad', + 'elementwise_add_grad', 'send', 'mul_grad', 'send', 'concat_grad', + 'sequence_pool_grad', 'lookup_table_grad', 'sequence_pool_grad', + 'lookup_table_grad', 'sum', 'split_selected_rows', 'send', 'recv', + 'recv', 'recv', 'concat' + ] + self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) + + +class TestAsyncDistLookupTable(TestDistLookupTableBase): + def net_conf(self): + self.network_with_table(is_sparse=True, is_distributed=True) + + def transpiler_test_impl(self): + config = fluid.DistributeTranspilerConfig() + + pserver1, startup1 = self.get_pserver(self.pserver1_ep, config, False) + + self.assertEqual(len(pserver1.blocks), 6) + # 0 listen_and_serv + # 1 optimize for fc_w or fc_b adam + self.assertEqual([op.type for op in pserver1.blocks[1].ops], + ["adam", "scale", "scale"]) + # 2 optimize for table sgd + self.assertEqual([op.type for op in pserver1.blocks[2].ops], ["sgd"]) + # 3 prefetch -> lookup_sparse_table for data0 + self.assertEqual([op.type for op in pserver1.blocks[3].ops], + ["lookup_sparse_table"]) + # 4 prefetch -> lookup_sparse_table for data1 + self.assertEqual([op.type for op in pserver1.blocks[4].ops], + ["lookup_sparse_table"]) + # 5 save table + self.assertEqual([op.type for op in pserver1.blocks[5].ops], ["save"]) + + trainer = self.get_trainer(config) + self.assertEqual(len(trainer.blocks), 1) + ops = [ + 'split_ids', 'prefetch', 'merge_ids', 'sequence_pool', 'split_ids', + 'prefetch', 'merge_ids', 'sequence_pool', 'concat', 'mul', + 'elementwise_add', 'cross_entropy', 'mean', 'fill_constant', + 'mean_grad', 'cross_entropy_grad', 'elementwise_add_grad', 'send', + 'mul_grad', 'send', 'concat_grad', 'sequence_pool_grad', + 'lookup_table_grad', 'sequence_pool_grad', 'lookup_table_grad', + 'sum', 'split_ids', 'send', 'recv', 'recv' + ] + self.assertEqual([op.type for op in trainer.blocks[0].ops], ops) + + +class TestRMSPropOptimizer(TranspilerTest): + def net_conf(self): + x = fluid.layers.data(name='x', shape=[1000], dtype='float32') + y_predict = fluid.layers.fc(input=x, + size=1000, + act=None, + param_attr=fluid.ParamAttr(name='fc_w'), + bias_attr=fluid.ParamAttr(name='fc_b')) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + optimizer = fluid.optimizer.RMSProp(learning_rate=0.1) + optimizer.minimize(avg_cost) + return + + def transpiler_test_impl(self): + pserver, startup = self.get_pserver(self.pserver1_ep) + pserver2, startup2 = self.get_pserver(self.pserver2_ep) + + self.assertEqual(len(pserver.blocks), 3) + # block1~2: optimize pass + self.assertEqual([op.type for op in pserver.blocks[1].ops], + ["sum", "scale", "rmsprop"]) + # the variable #fc_w will be split into two blocks + fc_w_var = startup.global_block().var("fc_w.block1") + self.assertEqual(fc_w_var.shape, (500, 1000)) + moment_var = startup.global_block().var("momentum_1") + self.assertEqual(moment_var.shape, (500, 1000)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dist_word2vec.py b/python/paddle/fluid/tests/unittests/test_dist_word2vec.py new file mode 100644 index 0000000000..543d0f9dc2 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_dist_word2vec.py @@ -0,0 +1,24 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +from test_dist_base import TestDistBase + + +class TestDistSeResneXt2x2(TestDistBase): + def test_se_resnext(self): + self.check_with_place("dist_word2vec.py", delta=1e-7) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py index 0faed94deb..fdc6adc93b 100644 --- a/python/paddle/fluid/tests/unittests/test_dyn_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_dyn_rnn.py @@ -17,6 +17,12 @@ import paddle import unittest import numpy +from paddle.fluid.layers.control_flow import lod_rank_table +from paddle.fluid.layers.control_flow import max_sequence_len +from paddle.fluid.layers.control_flow import lod_tensor_to_array +from paddle.fluid.layers.control_flow import array_to_lod_tensor +from paddle.fluid.layers.control_flow import shrink_memory + class TestDynRNN(unittest.TestCase): def setUp(self): @@ -38,12 +44,11 @@ class TestDynRNN(unittest.TestCase): label = fluid.layers.data(name='label', shape=[1], dtype='float32') - rank_table = fluid.layers.lod_rank_table(x=sent_emb) + rank_table = lod_rank_table(x=sent_emb) - sent_emb_array = fluid.layers.lod_tensor_to_array( - x=sent_emb, table=rank_table) + sent_emb_array = lod_tensor_to_array(x=sent_emb, table=rank_table) - seq_len = fluid.layers.max_sequence_len(rank_table=rank_table) + seq_len = max_sequence_len(rank_table=rank_table) i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) i.stop_gradient = False @@ -66,7 +71,7 @@ class TestDynRNN(unittest.TestCase): mem = fluid.layers.array_read(array=mem_array, i=i) ipt = fluid.layers.array_read(array=sent_emb_array, i=i) - mem = fluid.layers.shrink_memory(x=mem, i=i, table=rank_table) + mem = shrink_memory(x=mem, i=i, table=rank_table) hidden = fluid.layers.fc(input=[mem, ipt], size=100, act='tanh') @@ -75,8 +80,7 @@ class TestDynRNN(unittest.TestCase): fluid.layers.array_write(x=hidden, i=i, array=mem_array) fluid.layers.less_than(x=i, y=seq_len, cond=cond) - all_timesteps = fluid.layers.array_to_lod_tensor( - x=out, table=rank_table) + all_timesteps = array_to_lod_tensor(x=out, table=rank_table) last = fluid.layers.sequence_last_step(input=all_timesteps) logits = fluid.layers.fc(input=last, size=1, act=None) loss = fluid.layers.sigmoid_cross_entropy_with_logits( @@ -131,7 +135,7 @@ class TestDynRNN(unittest.TestCase): loss_0 = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] - for _ in xrange(100): + for _ in range(100): val = exe.run(main_program, feed=feeder.feed(data), fetch_list=[loss])[0] diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py index 2232939075..7756885166 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_gradient_check.py @@ -30,9 +30,6 @@ class Memory(object): assert val.dtype == self.ex.dtype self.cur = val - def ex(self): - return self.ex - def next(self): self.ex = self.cur self.cur = None @@ -64,13 +61,13 @@ class BaseRNN(object): self.num_seq = num_seq self.inputs = collections.defaultdict(list) - for _ in xrange(num_seq): + for _ in range(num_seq): seq_len = random.randint(1, max_seq_len - 1) for iname in ins: ishape = ins[iname].get('shape', None) idtype = ins[iname].get('dtype', 'float32') lst = [] - for _ in xrange(seq_len): + for _ in range(seq_len): lst.append(numpy.random.random(size=ishape).astype(idtype)) self.inputs[iname].append(lst) @@ -99,16 +96,16 @@ class BaseRNN(object): for out in self.outputs: retv[out] = [] - for seq_id in xrange(self.num_seq): + for seq_id in range(self.num_seq): for mname in self.mems: self.mems[mname].reset() for out in self.outputs: self.outputs[out].next_sequence() - iname0 = self.inputs.keys()[0] + iname0 = list(self.inputs.keys())[0] seq_len = len(self.inputs[iname0][seq_id]) - for step_id in xrange(seq_len): + for step_id in range(seq_len): xargs = dict() for iname in self.inputs: @@ -139,16 +136,16 @@ class BaseRNN(object): feed_dict = dict() for iname in self.inputs: - lod = [0] + lod = [] np_flatten = [] - for seq_id in xrange(len(self.inputs[iname])): + for seq_id in range(len(self.inputs[iname])): seq_len = len(self.inputs[iname][seq_id]) - lod.append(lod[-1] + seq_len) + lod.append(seq_len) np_flatten.extend(self.inputs[iname][seq_id]) t = fluid.Tensor() t.set(numpy.array(np_flatten), place) - t.set_lod([lod]) + t.set_recursive_sequence_lengths([lod]) feed_dict[iname] = t for pname in self.params: @@ -162,8 +159,8 @@ class BaseRNN(object): " which is not matrix") g = numpy.zeros(shape=p.shape, dtype=p.dtype) - for i in xrange(p.shape[0]): - for j in xrange(p.shape[1]): + for i in range(p.shape[0]): + for j in range(p.shape[1]): o = p[i][j] p[i][j] += delta pos = self._exe_mean_out_() @@ -187,7 +184,7 @@ class BaseRNN(object): if len(item.shape) != 1: raise ValueError("Not support") - for i in xrange(len(item)): + for i in range(len(item)): o = item[i] item[i] += delta pos = self._exe_mean_out_() @@ -201,14 +198,14 @@ class BaseRNN(object): if not return_one_tensor: return grad - for i in xrange(len(grad)): + for i in range(len(grad)): grad[i] = numpy.concatenate(grad[i]) grad = numpy.concatenate(grad) return grad def _exe_mean_out_(self): outs = self.exe() - return numpy.array([o.mean() for o in outs.itervalues()]).mean() + return numpy.array([o.mean() for o in outs.values()]).mean() class SeedFixedTestCase(unittest.TestCase): @@ -277,13 +274,14 @@ class TestSimpleMul(SeedFixedTestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) - out, w_g, i_g = map(numpy.array, - exe.run(feed=py_rnn.to_feed(cpu), - fetch_list=[ - out, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False)) + out, w_g, i_g = list( + map(numpy.array, + exe.run(feed=py_rnn.to_feed(cpu), + fetch_list=[ + out, self.PARAM_NAME + "@GRAD", self.DATA_NAME + + "@GRAD" + ], + return_numpy=False))) out_by_python = py_rnn.exe()[self.OUT_NAME] self.assertTrue(numpy.allclose(out, out_by_python)) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) @@ -354,14 +352,15 @@ class TestSimpleMulWithMemory(SeedFixedTestCase): cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) feed = py_rnn.to_feed(cpu) - last_np, w_g, i_g = map(numpy.array, - exe.run(feed=feed, - fetch_list=[ - last, self.PARAM_NAME + "@GRAD", - self.DATA_NAME + "@GRAD" - ], - return_numpy=False)) - last_by_py, = py_rnn.exe().values() + last_np, w_g, i_g = list( + map(numpy.array, + exe.run(feed=feed, + fetch_list=[ + last, self.PARAM_NAME + "@GRAD", self.DATA_NAME + + "@GRAD" + ], + return_numpy=False))) + last_by_py, = list(py_rnn.exe().values()) w_g_num = py_rnn.get_numeric_gradient_of_param(self.PARAM_NAME) self.assertTrue(numpy.allclose(last_np, last_by_py)) diff --git a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py index d3f63ee2c4..d182889a97 100644 --- a/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py +++ b/python/paddle/fluid/tests/unittests/test_dynrnn_static_input.py @@ -39,20 +39,20 @@ class TestDyRnnStaticInput(unittest.TestCase): def prepare_x_tensor(self): self.x_tensor_dim = 10 - lod = [[0, 2, 3, 6]] - shape = [lod[0][-1], self.x_tensor_dim] + lod = [[2, 1, 3]] + shape = [sum(lod[0]), self.x_tensor_dim] self.x_tensor_data = np.random.random(shape).astype('float32') self.x_tensor = core.LoDTensor() - self.x_tensor.set_lod(lod) + self.x_tensor.set_recursive_sequence_lengths(lod) self.x_tensor.set(self.x_tensor_data, self.place) def prepare_static_input_tensor(self): self.static_input_tensor_dim = 4 - lod = [[0, 1, 3, 6]] - shape = [lod[0][-1], self.static_input_tensor_dim] + lod = [[1, 2, 3]] + shape = [sum(lod[0]), self.static_input_tensor_dim] self.static_input_data = np.random.random(shape).astype('float32') self.static_input_tensor = core.LoDTensor() - self.static_input_tensor.set_lod(lod) + self.static_input_tensor.set_recursive_sequence_lengths(lod) self.static_input_tensor.set(self.static_input_data, self.place) def fetch_value(self, var): @@ -65,11 +65,11 @@ class TestDyRnnStaticInput(unittest.TestCase): return self._lodtensor_to_ndarray(fetch_outs[0]) def _lodtensor_to_ndarray(self, lod_tensor): - dims = lod_tensor.get_dims() + dims = lod_tensor.shape() ndarray = np.zeros(shape=dims).astype('float32') - for i in xrange(np.product(dims)): - ndarray.ravel()[i] = lod_tensor.get_float_element(i) - return ndarray, lod_tensor.lod() + for i in range(np.product(dims)): + ndarray.ravel()[i] = lod_tensor._get_float_element(i) + return ndarray, lod_tensor.recursive_sequence_lengths() def build_graph(self, only_forward=False): x_tensor = fluid.layers.data( @@ -114,7 +114,7 @@ class TestDyRnnStaticInput(unittest.TestCase): shape=[1], dtype='int64', value=0) step_idx.stop_gradient = True - for i in xrange(self._max_sequence_len): + for i in range(self._max_sequence_len): step_out = fluid.layers.array_read(static_input_out_array, step_idx) step_out.stop_gradient = True @@ -131,39 +131,40 @@ class TestDyRnnStaticInput(unittest.TestCase): framework.grad_var_name('static_input_tensor')) return static_input_grad, loss - def get_seq_len_from_lod(self, lod): - return [lod[0][i + 1] - lod[0][i] for i in xrange(len(lod[0]) - 1)] - def get_expected_static_step_outs(self): - x_lod = self.x_tensor.lod() - x_seq_len = self.get_seq_len_from_lod(x_lod) + x_lod = self.x_tensor.recursive_sequence_lengths() + x_seq_len = x_lod[0] x_seq_len_sorted = sorted(x_seq_len) x_sorted_indices = np.argsort(x_seq_len)[::-1] - static_lod = self.static_input_tensor.lod() - static_sliced = [ - self.static_input_data[static_lod[0][i]:static_lod[0][i + 1]] - for i in xrange(len(static_lod[0]) - 1) - ] - static_seq_len = self.get_seq_len_from_lod(static_lod) + static_lod = self.static_input_tensor.recursive_sequence_lengths() + static_sliced = [] + cur_offset = 0 + for i in range(len(static_lod[0])): + static_sliced.append(self.static_input_data[cur_offset:( + cur_offset + static_lod[0][i])]) + cur_offset += static_lod[0][i] + static_seq_len = static_lod[0] static_reordered = [] - for i in xrange(len(x_sorted_indices)): + for i in range(len(x_sorted_indices)): static_reordered.extend(static_sliced[x_sorted_indices[i]].tolist()) static_seq_len_reordered = [ static_seq_len[x_sorted_indices[i]] - for i in xrange(len(x_sorted_indices)) + for i in range(len(x_sorted_indices)) ] static_step_outs = [] static_step_lods = [] - for i in xrange(self._max_sequence_len): + for i in range(self._max_sequence_len): end = len(x_seq_len) - bisect.bisect_left(x_seq_len_sorted, i + 1) - lod = [0] - for i in xrange(end): - lod.append(static_seq_len_reordered[i] + lod[-1]) + lod = [] + total_len = 0 + for i in range(end): + lod.append(static_seq_len_reordered[i]) + total_len += lod[-1] static_step_lods.append([lod]) - end = lod[-1] + end = total_len static_step_outs.append( np.array(static_reordered[:end]).astype('float32')) @@ -173,7 +174,7 @@ class TestDyRnnStaticInput(unittest.TestCase): static_step_outs = self.build_graph(only_forward=True) self.exe.run(framework.default_startup_program()) expected_outs, expected_lods = self.get_expected_static_step_outs() - for i in xrange(self._max_sequence_len): + for i in range(self._max_sequence_len): step_out, lod = self.fetch_value(static_step_outs[i]) self.assertTrue(np.allclose(step_out, expected_outs[i])) self.assertTrue(np.allclose(lod, expected_lods[i])) @@ -184,22 +185,24 @@ class TestDyRnnStaticInput(unittest.TestCase): actual_gradients, actual_lod = self.fetch_value(static_input_grad) - static_input_shape = self.static_input_tensor.get_dims() + static_input_shape = self.static_input_tensor.shape() numeric_gradients = np.zeros(shape=static_input_shape).astype('float32') # calculate numeric gradients tensor_size = np.product(static_input_shape) - for i in xrange(tensor_size): - origin = self.static_input_tensor.get_float_element(i) + for i in range(tensor_size): + origin = self.static_input_tensor._get_float_element(i) x_pos = origin + self._delta - self.static_input_tensor.set_float_element(i, x_pos) + self.static_input_tensor._set_float_element(i, x_pos) y_pos = self.fetch_value(loss)[0][0] x_neg = origin - self._delta - self.static_input_tensor.set_float_element(i, x_neg) + self.static_input_tensor._set_float_element(i, x_neg) y_neg = self.fetch_value(loss)[0][0] - self.static_input_tensor.set_float_element(i, origin) + self.static_input_tensor._set_float_element(i, origin) numeric_gradients.ravel()[i] = (y_pos - y_neg) / self._delta / 2 self.assertTrue(np.allclose(actual_gradients, numeric_gradients, 0.001)) - self.assertTrue(np.allclose(actual_lod, self.static_input_tensor.lod())) + self.assertTrue( + np.allclose(actual_lod, + self.static_input_tensor.recursive_sequence_lengths())) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_edit_distance_op.py b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py index 2957fb5058..816562621b 100644 --- a/python/paddle/fluid/tests/unittests/test_edit_distance_op.py +++ b/python/paddle/fluid/tests/unittests/test_edit_distance_op.py @@ -52,23 +52,29 @@ class TestEditDistanceOp(OpTest): def setUp(self): self.op_type = "edit_distance" normalized = False - x1 = np.array([[0, 12, 3, 5, 8, 2]]).astype("int64") - x2 = np.array([[0, 12, 4, 7, 8]]).astype("int64") + x1 = np.array([[12, 3, 5, 8, 2]]).astype("int64") + x2 = np.array([[12, 4, 7, 8]]).astype("int64") x1 = np.transpose(x1) x2 = np.transpose(x2) - x1_lod = [0, 1, 5] - x2_lod = [0, 3, 4] + x1_lod = [1, 4] + x2_lod = [3, 1] - num_strs = len(x1_lod) - 1 + num_strs = len(x1_lod) distance = np.zeros((num_strs, 1)).astype("float32") sequence_num = np.array(2).astype("int64") + + x1_offset = 0 + x2_offset = 0 for i in range(0, num_strs): distance[i] = Levenshtein( - hyp=x1[x1_lod[i]:x1_lod[i + 1]], - ref=x2[x2_lod[i]:x2_lod[i + 1]]) + hyp=x1[x1_offset:(x1_offset + x1_lod[i])], + ref=x2[x2_offset:(x2_offset + x2_lod[i])]) + x1_offset += x1_lod[i] + x2_offset += x2_lod[i] if normalized is True: - len_ref = x2_lod[i + 1] - x2_lod[i] + len_ref = x2_lod[i] distance[i] = distance[i] / len_ref + self.attrs = {'normalized': normalized} self.inputs = {'Hyps': (x1, [x1_lod]), 'Refs': (x2, [x2_lod])} self.outputs = {'Out': distance, 'SequenceNum': sequence_num} @@ -81,23 +87,29 @@ class TestEditDistanceOpNormalized(OpTest): def setUp(self): self.op_type = "edit_distance" normalized = True - x1 = np.array([[0, 10, 3, 6, 5, 8, 2]]).astype("int64") - x2 = np.array([[0, 10, 4, 6, 7, 8]]).astype("int64") + x1 = np.array([[10, 3, 6, 5, 8, 2]]).astype("int64") + x2 = np.array([[10, 4, 6, 7, 8]]).astype("int64") x1 = np.transpose(x1) x2 = np.transpose(x2) - x1_lod = [0, 1, 3, 6] - x2_lod = [0, 2, 3, 5] + x1_lod = [1, 2, 3] + x2_lod = [2, 1, 2] - num_strs = len(x1_lod) - 1 + num_strs = len(x1_lod) distance = np.zeros((num_strs, 1)).astype("float32") sequence_num = np.array(3).astype("int64") + + x1_offset = 0 + x2_offset = 0 for i in range(0, num_strs): distance[i] = Levenshtein( - hyp=x1[x1_lod[i]:x1_lod[i + 1]], - ref=x2[x2_lod[i]:x2_lod[i + 1]]) + hyp=x1[x1_offset:(x1_offset + x1_lod[i])], + ref=x2[x2_offset:(x2_offset + x2_lod[i])]) + x1_offset += x1_lod[i] + x2_offset += x2_lod[i] if normalized is True: - len_ref = x2_lod[i + 1] - x2_lod[i] + len_ref = x2_lod[i] distance[i] = distance[i] / len_ref + self.attrs = {'normalized': normalized} self.inputs = {'Hyps': (x1, [x1_lod]), 'Refs': (x2, [x2_lod])} self.outputs = {'Out': distance, 'SequenceNum': sequence_num} diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py new file mode 100644 index 0000000000..bcdbfc8e52 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_mkldnn_op.py @@ -0,0 +1,130 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import numpy as np +import paddle.fluid.core as core +from op_test import OpTest +from test_elementwise_add_op import * +''' +Some tests differ from the tests defined in test_elementwise_add_op.py +because MKLDNN does not support tensors of number of dimensions 3. +Such dimensions cause exceptions in MKLDNN reorder primitive. +''' + + +class TestMKLDNNElementwiseAddOp(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [2, 3, 4, 5]).astype(self.dtype) + self.out = np.add(self.x, self.y) + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_scalar(TestElementwiseAddOp_scalar): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + self.out = self.x + self.y + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_scalar2(TestElementwiseAddOp_scalar2): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(1, 1).astype(self.dtype) + self.out = self.x + self.y + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_Vector(TestElementwiseAddOp_Vector): + def init_kernel_type(self): + self.use_mkldnn = True + + +class TesMKLDNNtElementwiseAddOp_broadcast_0(TestElementwiseAddOp_broadcast_0): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(2).astype(self.dtype) + self.out = self.x + self.y.reshape(2, 1, 1, 1) + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_broadcast_1(TestElementwiseAddOp_broadcast_1): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(3).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 1, 1) + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_broadcast_2(TestElementwiseAddOp_broadcast_2): + def init_input_output(self): + self.x = np.random.rand(2, 2, 3, 4).astype(self.dtype) + self.y = np.random.rand(4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 1, 1, 4) + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_broadcast_3(TestElementwiseAddOp_broadcast_3): + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_broadcast_4(TestElementwiseAddOp_broadcast_4): + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_rowwise_add_0( + TestElementwiseAddOp_rowwise_add_0): + def init_input_output(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + self.out = self.x + self.y.reshape(1, 3, 4, 1) + + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_rowwise_add_1( + TestElementwiseAddOp_rowwise_add_1): + def init_kernel_type(self): + self.use_mkldnn = True + + +class TestMKLDNNElementwiseAddOp_channelwise_add( + TestElementwiseAddOp_channelwise_add): + def init_input_output(self): + self.x = np.random.rand(3, 5, 20, 20).astype(self.dtype) + self.y = np.random.rand(3, 1, 1, 1).astype(self.dtype) + self.out = self.x + self.y + + def init_kernel_type(self): + self.use_mkldnn = True + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index 1f52bd90d0..fb9a496126 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -18,19 +18,23 @@ from op_test import OpTest class TestElementwiseAddOp(OpTest): + def init_kernel_type(self): + self.use_mkldnn = False + def setUp(self): self.op_type = "elementwise_add" self.dtype = np.float32 self.axis = -1 self.init_dtype() self.init_input_output() + self.init_kernel_type() self.init_axis() self.inputs = { 'X': OpTest.np_dtype_to_fluid_dtype(self.x), 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) } - self.attrs = {'axis': self.axis} + self.attrs = {'axis': self.axis, 'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': self.out} def test_check_output(self): @@ -252,5 +256,25 @@ class TestFP16ElementwiseAddOp_rowwise_add_1(TestFP16ElementwiseAddOp): self.axis = 1 +class TestElementwiseAddOp_channelwise_add(TestElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(3, 20, 20).astype(self.dtype) + self.y = np.random.rand(3, 1, 1).astype(self.dtype) + self.out = self.x + self.y + + def init_axis(self): + self.axis = -1 + + +class TestFP16ElementwiseAddOp_channelwise_add(TestFP16ElementwiseAddOp): + def init_input_output(self): + self.x = np.random.rand(3, 10, 20).astype(self.dtype) + self.y = np.random.rand(3, 1, 1).astype(self.dtype) + self.out = self.x + self.y + + def init_axis(self): + self.axis = -1 + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py index c6f45381af..6f35004489 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_gradient_op.py @@ -26,7 +26,7 @@ class TestElementWiseAddOp(unittest.TestCase): def test_with_place(place): out_grad = np.random.random_sample(self.x.shape).astype(np.float32) x_grad = out_grad - sum_axis = range(0, len(self.x.shape)) + sum_axis = list(range(0, len(self.x.shape))) del sum_axis[self.axis] y_grad = np.sum(out_grad, axis=tuple(sum_axis)) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py index acf652d3fb..1854232194 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_sub_op.py @@ -20,8 +20,8 @@ class TestElementwiseOp(OpTest): def setUp(self): self.op_type = "elementwise_sub" self.inputs = { - 'X': np.random.uniform(0.1, 1, [13, 17]).astype("float32"), - 'Y': np.random.uniform(0.1, 1, [13, 17]).astype("float32") + 'X': np.random.uniform(0.1, 1, [2, 3]).astype("float32"), + 'Y': np.random.uniform(0.1, 1, [2, 3]).astype("float32") } self.outputs = {'Out': self.inputs['X'] - self.inputs['Y']} diff --git a/python/paddle/fluid/tests/unittests/test_extract_rows_op.py b/python/paddle/fluid/tests/unittests/test_extract_rows_op.py new file mode 100644 index 0000000000..6a41c44fe6 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_extract_rows_op.py @@ -0,0 +1,58 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle.fluid.core as core +from paddle.fluid.op import Operator +from op_test import OpTest + + +class TestExtractRows(OpTest): + def check_with_place(self, place): + scope = core.Scope() + + # create and initialize Variable + feature_len = 12 + rows = [0, 4, 4, 7] + np_array = np.ones((len(rows), feature_len)).astype("float32") + + in_x = scope.var('X').get_selected_rows() + in_x.set_height(len(rows)) + in_x.set_rows(rows) + in_x_tensor = in_x.get_tensor() + in_x_tensor.set(np_array, place) + + # create Out Variable + out_tensor = scope.var('Out').get_tensor() + + # create and run lookup_table operator + extract_rows_op = Operator("extract_rows", X='X', Out='Out') + extract_rows_op.run(scope, place) + + # get result from Out + result_array = np.array(out_tensor) + result_array = [ele[0] for ele in result_array] + assert result_array == rows + + def test_concat_rows(self): + places = [core.CPUPlace()] + if core.is_compiled_with_cuda(): + places.append(core.CUDAPlace(0)) + for place in places: + self.check_with_place(place) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py new file mode 100644 index 0000000000..026ac2112b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fake_dequantize_op.py @@ -0,0 +1,59 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import math +from op_test import OpTest + + +def quantize_max_abs(x, num_bits): + range = math.pow(2, num_bits) - 1 + scale = np.max(np.abs(x).flatten()) + y = np.round(x / scale * range) + return y, scale + + +def dequantize_max_abs(x, num_bits, scale): + range = math.pow(2, num_bits) - 1 + y = (scale / range) * x + return y + + +class TestFakeDequantizeMaxAbsOp(OpTest): + def set_args(self): + self.num_bits = 8 + + def setUp(self): + self.set_args() + self.op_type = "fake_dequantize_max_abs" + x = np.random.randn(31, 65).astype("float32") + yq, scale = quantize_max_abs(x, self.num_bits) + ydq = dequantize_max_abs(yq, self.num_bits, scale) + + self.inputs = {'X': yq} + self.attrs = {'num_bits': self.num_bits, 'scale': float(scale)} + self.outputs = {'Out': ydq} + + def test_check_output(self): + self.check_output() + + +class TestFakeDequantizeMaxAbsOp5Bits(OpTest): + def set_args(self): + self.num_bits = 5 + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py new file mode 100644 index 0000000000..6c6aa9d3bb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fake_quantize_op.py @@ -0,0 +1,51 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestFakeQuantizeOp(OpTest): + def setUp(self): + self.op_type = "fake_quantize" + self.attrs = { + 'bit_length': 8, + 'quantize_type': 'abs_max', + 'window_size': 10000 + } + self.inputs = { + 'X': np.random.random((10, 10)).astype("float32"), + 'InScales': np.zeros(self.attrs['window_size']).astype("float32"), + 'InCurrentIter': np.zeros(1).astype("float32"), + 'InMovingScale': np.zeros(1).astype("float32") + } + self.scale = { + 'abs_max': np.max(np.abs(self.inputs['X'])).astype("float32") + } + self.outputs = { + 'Out': np.round(self.inputs['X'] / self.scale['abs_max'] * ( + (1 << (self.attrs['bit_length'] - 1)) - 1)), + 'OutScales': np.zeros(self.attrs['window_size']).astype("float32"), + 'OutMovingScale': + np.array([self.scale['abs_max']]).astype("float32"), + 'OutCurrentIter': np.zeros(1).astype("float32") + } + + def test_check_output(self): + self.check_output() + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py index 9d724a6479..8b9da84311 100644 --- a/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py +++ b/python/paddle/fluid/tests/unittests/test_feed_fetch_method.py @@ -24,17 +24,16 @@ class TestFeedFetch(unittest.TestCase): input_array = np.ones((4, 4, 6)).astype("float32") input_array[0, 0, 0] = 3 input_array[3, 3, 5] = 10 - input_tensor = core.LoDTensor([[0, 2, 4]]) + input_tensor = core.LoDTensor([[2, 2]]) input_tensor.set(input_array, place) core.set_feed_variable(scope, input_tensor, "feed", 0) output_tensor = core.get_fetch_variable(scope, "feed", 0) - output_lod = output_tensor.lod() - self.assertEqual(0, output_lod[0][0]) + output_lod = output_tensor.recursive_sequence_lengths() + self.assertEqual(2, output_lod[0][0]) self.assertEqual(2, output_lod[0][1]) - self.assertEqual(4, output_lod[0][2]) output_array = np.array(output_tensor) self.assertEqual(3, output_array[0, 0, 0]) diff --git a/python/paddle/fluid/tests/unittests/test_fetch_var.py b/python/paddle/fluid/tests/unittests/test_fetch_var.py index 46c3bbb671..e6f37f0b4c 100644 --- a/python/paddle/fluid/tests/unittests/test_fetch_var.py +++ b/python/paddle/fluid/tests/unittests/test_fetch_var.py @@ -26,7 +26,7 @@ class TestFetchVar(op_test.OpTest): layers.assign(input=val, output=x) exe = fluid.Executor(fluid.CPUPlace()) exe.run(fluid.default_main_program(), feed={}, fetch_list=[]) - fetched_x = fluid.fetch_var("x") + fetched_x = fluid.executor._fetch_var("x") self.assertTrue( numpy.array_equal(fetched_x, val), "fetch_x=%s val=%s" % (fetched_x, val)) diff --git a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py index 66e3e2d51d..0c75cf33f5 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_constant_batch_size_like_op.py @@ -50,5 +50,27 @@ class TestFillConstantBatchSizeLikeWhenSecondDimIsBatchSize(OpTest): self.check_output() +class TestFillConstantBatchSizeLikeWithLoDTensor(OpTest): + def setUp(self): + self.op_type = "fill_constant_batch_size_like" + self.inputs = { + 'Input': (np.random.random((31, 28)).astype("float32"), + [[9, 14, 8]]) + } + self.attrs = { + 'value': 3.5, + 'shape': [-1, 16], + 'input_dim_idx': 0, + 'output_dim_idx': 0 + } + + out = np.random.random((3, 16)).astype("float32") + out.fill(3.5) + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_flatten_op.py b/python/paddle/fluid/tests/unittests/test_flatten_op.py new file mode 100644 index 0000000000..f8692ce2ea --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_flatten_op.py @@ -0,0 +1,68 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +from op_test import OpTest + + +class TestFlattenOp(OpTest): + def setUp(self): + self.op_type = "flatten" + self.init_test_case() + self.inputs = {"X": np.random.random(self.in_shape).astype("float32")} + self.init_attrs() + self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + def init_test_case(self): + self.in_shape = (3, 2, 2, 5) + self.axis = 1 + self.new_shape = (3, 20) + + def init_attrs(self): + self.attrs = {"axis": self.axis} + + +class TestFlattenOp(TestFlattenOp): + def init_test_case(self): + self.in_shape = (3, 2, 2, 3) + self.axis = 0 + self.new_shape = (1, 36) + + +class TestFlattenOpWithDefaultAxis(TestFlattenOp): + def init_test_case(self): + self.in_shape = (3, 2, 2, 3) + self.new_shape = (3, 12) + + def init_attrs(self): + self.attrs = {} + + +class TestFlattenOpSixDims(TestFlattenOp): + def init_test_case(self): + self.in_shape = (3, 2, 3, 2, 4, 4) + self.axis = 4 + self.new_shape = (36, 16) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py new file mode 100644 index 0000000000..ec0a939e9e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_fused_elemwise_activation_op.py @@ -0,0 +1,818 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import unittest +import numpy as np +import paddle.fluid.core as core +from op_test import OpTest + +# scale + add +# TestElementwiseAddOp +# TestFusedOperatorsOp_scalar +# TestFusedOperatorsOp_scalar2 +# TestFusedOperatorsOp_Vector +# TestFusedOperatorsOp_broadcast_0 +# TestFusedOperatorsOp_broadcast_1 +# TestFusedOperatorsOp_broadcast_2 +# TestFusedOperatorsOp_broadcast_3 +# TestFusedOperatorsOp_broadcast_4 +# TestFusedOperatorsOp_rowwise_add_0 +# TestFusedOperatorsOp_rowwise_add_1 +# TestFusedOperatorsOp_channelwise_add + + +class TestElementwiseAddOp(OpTest): + def setUp(self): + self.op_type = "fused_elemwise_activation" + self.dtype = np.float32 + self.axis = -1 + + self.init_axis() + self.init_dtype() + self.init_input() + self.init_output() + self.init_attr() + + self.inputs = { + 'X': OpTest.np_dtype_to_fluid_dtype(self.x), + 'Y': OpTest.np_dtype_to_fluid_dtype(self.y) + } + self.outputs = {'Out': self.out} + + def init_input(self): + self.x = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + self.y = np.random.uniform(0.1, 1, [13, 17]).astype(self.dtype) + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["scale", "elementwise_add"] + } + + def init_dtype(self): + pass + + def init_axis(self): + pass + + def test_check_output(self): + self.check_output() + + def test_check_grad_normal(self): + self.check_grad(['X', 'Y'], 'Out', max_relative_error=0.005) + + def test_check_grad_ingore_x(self): + self.check_grad( + ['Y'], 'Out', max_relative_error=0.005, no_grad_set=set("X")) + + def test_check_grad_ingore_y(self): + self.check_grad( + ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) + + +class TestFusedOperatorsOp_scalar(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y) * self.scale + + +class TestFusedOperatorsOp_scalar2(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(1, 1).astype(self.dtype) + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y) * self.scale + + +class TestFusedOperatorsOp_Vector(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.random((32, )).astype(self.dtype) + self.y = np.random.random((32, )).astype(self.dtype) + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y) * self.scale + + +class TestFusedOperatorsOp_broadcast_0(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(2).astype(self.dtype) + + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(2, 1, 1)) * self.scale + + +class TestFusedOperatorsOp_broadcast_1(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(3).astype(self.dtype) + + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(1, 3, 1)) * self.scale + + +class TestFusedOperatorsOp_broadcast_2(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(4).astype(self.dtype) + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(1, 1, 4)) * self.scale + + +class TestFusedOperatorsOp_broadcast_3(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(1, 3, 4, 1)) * self.scale + + +class TestFusedOperatorsOp_broadcast_4(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4, 5).astype(self.dtype) + self.y = np.random.rand(2, 1).astype(self.dtype) + + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(2, 1, 1, 1)) * self.scale + + +class TestFusedOperatorsOp_rowwise_add_0(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 3, 4).astype(self.dtype) + self.y = np.random.rand(3, 4).astype(self.dtype) + + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(1, 3, 4)) * self.scale + + +class TestFusedOperatorsOp_rowwise_add_1(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(2, 1).astype(self.dtype) + self.y = np.random.rand(1).astype(self.dtype) + + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y.reshape(1, 1)) * self.scale + + +class TestFusedOperatorsOp_channelwise_add(TestElementwiseAddOp): + def init_input(self): + self.x = np.random.rand(3, 20, 20).astype(self.dtype) + self.y = np.random.rand(3, 1, 1).astype(self.dtype) + + def init_axis(self): + self.axis = -1 + + def init_output(self): + self.scale = 0.1 + self.out = (self.x + self.y) * self.scale + + +# add + scale +# TestElementwiseAddOp_f_add_scale +# TestFusedOperatorsOp_scalar_f_add_scale +# TestFusedOperatorsOp_scalar2_f_add_scale +# TestFusedOperatorsOp_Vector_f_add_scale +# TestFusedOperatorsOp_broadcast_0_f_add_scale +# TestFusedOperatorsOp_broadcast_1_f_add_scale +# TestFusedOperatorsOp_broadcast_2_f_add_scale +# TestFusedOperatorsOp_broadcast_3_f_add_scale +# TestFusedOperatorsOp_broadcast_4_f_add_scale +# TestFusedOperatorsOp_rowwise_add_0_f_add_scale +# TestFusedOperatorsOp_rowwise_add_1_f_add_scale +# TestFusedOperatorsOp_channelwise_add_f_add_scale + + +class TestFusedOperatorsOp_f_add_scale(TestElementwiseAddOp): + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_scalar_f_add_scale(TestFusedOperatorsOp_scalar): + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_scalar2_f_add_scale(TestFusedOperatorsOp_scalar2): + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_Vector_f_add_scale(TestFusedOperatorsOp_Vector): + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_broadcast_0_f_add_scale( + TestFusedOperatorsOp_broadcast_0): + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y.reshape(2, 1, 1) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_broadcast_1_f_add_scale( + TestFusedOperatorsOp_broadcast_1): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y.reshape(1, 3, 1) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_broadcast_2_f_add_scale( + TestFusedOperatorsOp_broadcast_2): + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y.reshape(1, 1, 4) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_broadcast_3_f_add_scale( + TestFusedOperatorsOp_broadcast_3): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y.reshape(1, 3, 4, 1) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_broadcast_4_f_add_scale( + TestFusedOperatorsOp_broadcast_4): + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.scale = 0.2 + self.out = self.x + self.y.reshape(2, 1, 1, 1) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_rowwise_add_0_f_add_scale( + TestFusedOperatorsOp_rowwise_add_0): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.1 + self.out = self.x + self.y.reshape(1, 3, 4) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_rowwise_add_1_f_add_scale( + TestFusedOperatorsOp_rowwise_add_1): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.scale = 0.2 + self.out = self.x + self.y.reshape(1, 1) * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +class TestFusedOperatorsOp_channelwise_add_f_add_scale( + TestFusedOperatorsOp_channelwise_add): + def init_axis(self): + self.axis = -1 + + def init_output(self): + self.scale = 0.2 + self.out = self.x + self.y * self.scale + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'scale': self.scale, + 'functor_list': ["elementwise_add", "scale"] + } + + +# add + relu +# TestElementwiseAddOp_f_add_relu +# TestFusedOperatorsOp_scalar_f_add_relu +# TestFusedOperatorsOp_scalar2_f_add_relu +# TestFusedOperatorsOp_Vector_f_add_relu +# TestFusedOperatorsOp_broadcast_0_f_add_relu +# TestFusedOperatorsOp_broadcast_1_f_add_relu +# TestFusedOperatorsOp_broadcast_2_f_add_relu +# TestFusedOperatorsOp_broadcast_3_f_add_relu +# TestFusedOperatorsOp_broadcast_4_f_add_relu +# TestFusedOperatorsOp_rowwise_add_0_f_add_relu +# TestFusedOperatorsOp_rowwise_add_1_f_add_relu +# TestFusedOperatorsOp_channelwise_add_f_add_relu + + +class TestFusedOperatorsOp_f_add_relu(TestElementwiseAddOp): + def init_output(self): + # Copy from test_activation_op.py + # Because we set delta = 0.005 in calculating numeric gradient, + # if x is too small, such as 0.002, x_neg will be -0.003 + # x_pos will be 0.007, so the numeric gradient is inaccurate. + # we should avoid this + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y, 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_scalar_f_add_relu(TestFusedOperatorsOp_scalar): + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y, 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_scalar2_f_add_relu(TestFusedOperatorsOp_scalar2): + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y, 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_Vector_f_add_relu(TestFusedOperatorsOp_Vector): + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y, 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_broadcast_0_f_add_relu( + TestFusedOperatorsOp_broadcast_0): + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(2, 1, 1), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_broadcast_1_f_add_relu( + TestFusedOperatorsOp_broadcast_1): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(1, 3, 1), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_broadcast_2_f_add_relu( + TestFusedOperatorsOp_broadcast_2): + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(1, 1, 4), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_broadcast_3_f_add_relu( + TestFusedOperatorsOp_broadcast_3): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(1, 3, 4, 1), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_broadcast_4_f_add_relu( + TestFusedOperatorsOp_broadcast_4): + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(2, 1, 1, 1), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_rowwise_add_0_f_add_relu( + TestFusedOperatorsOp_rowwise_add_0): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(1, 3, 4), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_rowwise_add_1_f_add_relu( + TestFusedOperatorsOp_rowwise_add_1): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y.reshape(1, 1), 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +class TestFusedOperatorsOp_channelwise_add_f_add_relu( + TestFusedOperatorsOp_channelwise_add): + def init_axis(self): + self.axis = -1 + + def init_output(self): + self.y[np.abs(self.y) < 0.005] = 0.02 + self.out = self.x + np.maximum(self.y, 0) + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["elementwise_add", "relu"] + } + + +# relu + add +# TestElementwiseAddOp_f_relu_add +# TestFusedOperatorsOp_scalar_f_relu_add +# TestFusedOperatorsOp_scalar2_f_relu_add +# TestFusedOperatorsOp_Vector_f_relu_add +# TestFusedOperatorsOp_broadcast_0_f_relu_add +# TestFusedOperatorsOp_broadcast_1_f_relu_add +# TestFusedOperatorsOp_broadcast_2_f_relu_add +# TestFusedOperatorsOp_broadcast_3_f_relu_add +# TestFusedOperatorsOp_broadcast_4_f_relu_add +# TestFusedOperatorsOp_rowwise_add_0_f_relu_add +# TestFusedOperatorsOp_rowwise_add_1_f_relu_add +# TestFusedOperatorsOp_channelwise_add_f_relu_add + + +class TestFusedOperatorsOp_f_relu_add(TestElementwiseAddOp): + def init_output(self): + # Copy from test_activation_op.py + # Because we set delta = 0.005 in calculating numeric gradient, + # if x is too small, such as 0.002, x_neg will be -0.003 + # x_pos will be 0.007, so the numeric gradient is inaccurate. + # we should avoid this + self.out = self.x + self.y + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_scalar_f_relu_add(TestFusedOperatorsOp_scalar): + def init_output(self): + self.out = self.x + self.y + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_scalar2_f_relu_add(TestFusedOperatorsOp_scalar2): + def init_output(self): + self.out = self.x + self.y + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_Vector_f_relu_add(TestFusedOperatorsOp_Vector): + def init_output(self): + self.out = self.x + self.y + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_broadcast_0_f_relu_add( + TestFusedOperatorsOp_broadcast_0): + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.out = self.x + self.y.reshape(2, 1, 1) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_broadcast_1_f_relu_add( + TestFusedOperatorsOp_broadcast_1): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.out = self.x + self.y.reshape(1, 3, 1) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_broadcast_2_f_relu_add( + TestFusedOperatorsOp_broadcast_2): + def init_output(self): + self.out = self.x + self.y.reshape(1, 1, 4) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_broadcast_3_f_relu_add( + TestFusedOperatorsOp_broadcast_3): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.out = self.x + self.y.reshape(1, 3, 4, 1) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_broadcast_4_f_relu_add( + TestFusedOperatorsOp_broadcast_4): + def init_axis(self): + self.axis = 0 + + def init_output(self): + self.out = self.x + self.y.reshape(2, 1, 1, 1) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_rowwise_add_0_f_relu_add( + TestFusedOperatorsOp_rowwise_add_0): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.out = self.x + self.y.reshape(1, 3, 4) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_rowwise_add_1_f_relu_add( + TestFusedOperatorsOp_rowwise_add_1): + def init_axis(self): + self.axis = 1 + + def init_output(self): + self.out = self.x + self.y.reshape(1, 1) + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +class TestFusedOperatorsOp_channelwise_add_f_relu_add( + TestFusedOperatorsOp_channelwise_add): + def init_axis(self): + self.axis = -1 + + def init_output(self): + self.out = self.x + self.y + self.out = np.maximum(self.out, 0) + self.out[np.abs(self.out) < 0.005] = 0.02 + + def init_attr(self): + self.attrs = { + 'axis': self.axis, + 'functor_list': ["relu", "elementwise_add"] + } + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_gather_op.py b/python/paddle/fluid/tests/unittests/test_gather_op.py index 6fd043c27e..4ae9086480 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_op.py @@ -20,8 +20,9 @@ from op_test import OpTest class TestGatherOp(OpTest): def setUp(self): self.op_type = "gather" - xnp = np.random.random((10, 20)).astype("float32") - self.inputs = {'X': xnp, 'Index': np.array([1, 3, 5]).astype("int32")} + self.config() + xnp = np.random.random(self.x_shape).astype("float32") + self.inputs = {'X': xnp, 'Index': np.array(self.index).astype("int32")} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]} def test_check_output(self): @@ -30,6 +31,16 @@ class TestGatherOp(OpTest): def test_check_grad(self): self.check_grad(['X'], 'Out') + def config(self): + self.x_shape = (10, 20) + self.index = [1, 3, 5] + + +class TestCase1(TestGatherOp): + def config(self): + self.x_shape = (10) + self.index = [1, 3, 5] + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py new file mode 100644 index 0000000000..3ae877a608 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_mkldnn_op.py @@ -0,0 +1,26 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from test_gaussian_random_op import TestGaussianRandomOp + + +class TestMKLDNN(TestGaussianRandomOp): + def init_kernel_type(self): + self.use_mkldnn = True + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py index 272caceaf3..8481500fd7 100644 --- a/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py +++ b/python/paddle/fluid/tests/unittests/test_gaussian_random_op.py @@ -25,7 +25,15 @@ class TestGaussianRandomOp(unittest.TestCase): def setUp(self): self.op_type = "gaussian_random" self.inputs = {} - self.attrs = {"shape": [1000, 784], "mean": .0, "std": 1., "seed": 10} + self.use_mkldnn = False + self.init_kernel_type() + self.attrs = { + "shape": [1000, 784], + "mean": .0, + "std": 1., + "seed": 10, + "use_mkldnn": self.use_mkldnn + } self.outputs = ["Out"] @@ -58,6 +66,9 @@ class TestGaussianRandomOp(unittest.TestCase): self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1) self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1) + def init_kernel_type(self): + pass + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py index 6dab1e22f0..964423e2d2 100644 --- a/python/paddle/fluid/tests/unittests/test_get_places_op.py +++ b/python/paddle/fluid/tests/unittests/test_get_places_op.py @@ -13,6 +13,7 @@ # limitations under the License. import paddle.fluid as fluid +from paddle.fluid.layers.device import get_places import decorators import unittest @@ -20,7 +21,7 @@ import unittest class TestGetPlaces(unittest.TestCase): @decorators.prog_scope() def test_get_places(self): - places = fluid.layers.get_places() + places = get_places() cpu = fluid.CPUPlace() exe = fluid.Executor(cpu) exe.run(fluid.default_main_program()) diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 3a13eb872a..86a2c674d0 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -20,8 +20,8 @@ from test_lstm_op import identity, sigmoid, tanh, relu class TestGRUOp(OpTest): - lod = [[0, 2, 6, 9]] - batch_size = lod[0][-1] + lod = [[2, 4, 3]] + batch_size = sum(lod[0]) frame_size = 5 activate = { 'identity': identity, @@ -33,12 +33,12 @@ class TestGRUOp(OpTest): @staticmethod def seq_to_batch(lod, is_reverse): idx_in_seq_list = [] - seq_starts = lod[0] - seq_lens = [] - for i in range(len(seq_starts) - 1): - seq_lens.append(seq_starts[i + 1] - seq_starts[i]) + seq_lens = lod[0] + seq_starts = [0] + for i in range(len(seq_lens)): + seq_starts.append(seq_starts[-1] + seq_lens[i]) sorted_seqs = sorted( - range(len(seq_lens)), lambda x, y: seq_lens[y] - seq_lens[x]) + list(range(len(seq_lens))), lambda x, y: seq_lens[y] - seq_lens[x]) num_batch = seq_lens[sorted_seqs[0]] for batch_idx in range(num_batch): idx_in_seq = [] @@ -74,15 +74,16 @@ class TestGRUOp(OpTest): def gru(self): input, lod = self.inputs['Input'] w = self.inputs['Weight'] - b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros( + b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros( (1, self.frame_size * 3)) batch_gate = self.outputs['BatchGate'] batch_reset_hidden_prev = self.outputs['BatchResetHiddenPrev'] batch_hidden = self.outputs['BatchHidden'] hidden = self.outputs['Hidden'] idx_in_seq_list = self.idx_in_seq_list - h_p = self.inputs['H0'][self.sorted_seqs] if self.inputs.has_key( - 'H0') else np.zeros((len(idx_in_seq_list[0]), self.frame_size)) + h_p = self.inputs['H0'][ + self.sorted_seqs] if 'H0' in self.inputs else np.zeros( + (len(idx_in_seq_list[0]), self.frame_size)) num_batch = len(idx_in_seq_list) end_idx = 0 for batch_idx in range(num_batch): diff --git a/python/paddle/fluid/tests/unittests/test_gru_unit_op.py b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py index c56b1eefd3..87a9eba4d9 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_unit_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_unit_op.py @@ -76,7 +76,7 @@ class TestGRUUnitOp(OpTest): x = self.inputs['Input'] h_p = self.inputs['HiddenPrev'] w = self.inputs['Weight'] - b = self.inputs['Bias'] if self.inputs.has_key('Bias') else np.zeros( + b = self.inputs['Bias'] if 'Bias' in self.inputs else np.zeros( (1, frame_size * 3)) g = x + np.tile(b, (batch_size, 1)) w_u_r = w.flatten()[:frame_size * frame_size * 2].reshape( diff --git a/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py new file mode 100644 index 0000000000..daa5da8d95 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_hsigmoid_op.py @@ -0,0 +1,101 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import math +from op_test import OpTest + +np.random.seed(100) + + +def find_latest_set(num): + return 1 + int(math.floor(math.log(num, 2))) + + +class CodeTable(object): + def __init__(self, num_classes, code): + self.c = num_classes + code + + def cal_index(self, bit): + return (self.c >> (bit + 1)) - 1 + + def get_length(self): + return find_latest_set(self.c) - 1 + + def cal_bit(self, bit): + return self.c & (1 << bit) + + +def hsigmoid(x, w, label, bias, num_classes): + batch_size = x.shape[0] + code_length = find_latest_set(num_classes - 1) + code_table = [0 for _ in range(code_length)] + pre_output = np.zeros((batch_size, code_length)) + pre_sum = np.zeros((batch_size, 1)) + out = np.zeros((batch_size, 1)).astype("float32") + for i in range(batch_size): + code_table = CodeTable(num_classes, label[i]) + length = code_table.get_length() + for j in range(length): + idx = code_table.cal_index(j) + pre_output[i][j] += bias[0][idx] + for i in range(batch_size): + code_table = CodeTable(num_classes, label[i]) + length = code_table.get_length() + for j in range(length): + idx = code_table.cal_index(j) + pre_output[i][j] += np.dot(w[idx], x[i]) + # clip[-40.0, 40.0] + pre_output = np.clip(pre_output, -40.0, 40.0) + # out(i, 0) = \sum_j bit(i, j) * preout(i, j) + for i in range(batch_size): + code_table = CodeTable(num_classes, label[i]) + length = code_table.get_length() + sum = 0.0 + for j in range(length): + if code_table.cal_bit(j): + sum += pre_output[i][j] + out[i] = -1.0 * sum + # soft relu + pre_output = np.log(1 + np.exp(pre_output)) + pre_sum = pre_output.sum(1).reshape((batch_size, 1)) + out += pre_sum + return pre_output, out + + +class TestHSigmoidOp(OpTest): + def setUp(self): + self.op_type = "hierarchical_sigmoid" + num_classes = 6 + feature_size = 8 + batch_size = 4 + x = np.random.random((batch_size, feature_size)).astype("float32") + w = np.random.random((num_classes - 1, feature_size)).astype("float32") + label = np.random.randint(0, num_classes, (batch_size, 1)) + bias = np.random.random((1, num_classes - 1)).astype("float32") + self.attrs = {'num_classes': num_classes} + self.inputs = {'X': x, 'W': w, 'Label': label, 'Bias': bias} + pre_output, out = hsigmoid(x, w, label, bias, num_classes) + self.outputs = {'PreOut': pre_output, 'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['Bias', 'X', 'W'], ['Out'], no_grad_set=set('Label')) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py index 4946475f11..13bc576874 100644 --- a/python/paddle/fluid/tests/unittests/test_im2sequence_op.py +++ b/python/paddle/fluid/tests/unittests/test_im2sequence_op.py @@ -16,23 +16,48 @@ import numpy as np from op_test import OpTest -def get_output_shape(attrs, in_shape): +def get_output_shape(attrs, in_shape, img_real_size): + batchsize = in_shape[0] img_height = in_shape[2] img_width = in_shape[3] + paddings = np.array(attrs['paddings']).astype("int32") + kernels = np.array(attrs['kernels']).astype("int32") + strides = np.array(attrs['strides']).astype("int32") + output_height = np.zeros((1, batchsize)).astype("int32") + output_width = np.zeros((1, batchsize)).astype("int32") + if len(img_real_size): + out_stride = np.array(attrs['out_stride']).astype("int32") + imgreal_h = 0 + imgreal_w = 0 + for index in range(batchsize): + if img_real_size[index, 0] % out_stride[0] == 0: + imgreal_h = img_real_size[index, 0] / out_stride[0] + else: + imgreal_h = img_real_size[index, 0] / out_stride[0] + 1 + if img_real_size[index, 0] % out_stride[1] == 0: + imgreal_w = img_real_size[index, 1] / out_stride[1] + else: + imgreal_w = img_real_size[index, 0] / out_stride[1] + 1 + output_height[0,index] = \ + 1 + \ + (imgreal_h + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \ + strides[0] - paddings = attrs['paddings'] - kernels = attrs['kernels'] - strides = attrs['strides'] + output_width[0,index] = \ + 1 + \ + (imgreal_w + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \ + strides[1] + else: + for index in range(batchsize): + output_height[0,index] = \ + 1 + \ + (img_height + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \ + strides[0] - output_height = \ - 1 + \ - (img_height + paddings[0] + paddings[2] - kernels[0] + strides[0] - 1) / \ - strides[0] - - output_width = \ - 1 + \ - (img_width + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \ - strides[1] + output_width[0,index] = \ + 1 + \ + (img_width + paddings[1] + paddings[3] - kernels[1] + strides[1] - 1) / \ + strides[1] return output_height, output_width @@ -75,22 +100,25 @@ def im2col(attrs, im, col): im_row_offset][im_col_offset] -def Im2Sequence(inputs, attrs): - output_height, output_width = get_output_shape(attrs, inputs.shape) +def Im2Sequence(inputs, img_real_size, attrs): + output_height, output_width = get_output_shape(attrs, inputs.shape, + img_real_size) img_channels = inputs.shape[1] batch_size = inputs.shape[0] - out = np.zeros([ - batch_size, output_height, output_width, img_channels, - attrs['kernels'][0], attrs['kernels'][1] - ]).astype("float32") - - for i in range(len(inputs)): - im2col(attrs, inputs[i], out[i]) - - out = out.reshape([ - batch_size * output_height * output_width, - img_channels * attrs['kernels'][0] * attrs['kernels'][1] - ]) + out = [] + for index in range(batch_size): + tmp = np.zeros([ + output_height[0, index], output_width[0, index], img_channels, + attrs['kernels'][0], attrs['kernels'][1] + ]).astype("float32") + out.append(tmp) + for index in range(len(inputs)): + im2col(attrs, inputs[index], out[index]) + out[index] = out[index].reshape([ + output_height[0, index] * output_width[0, index], + img_channels * attrs['kernels'][0] * attrs['kernels'][1] + ]) + out = np.concatenate(out, axis=0) return out @@ -103,7 +131,7 @@ class TestBlockExpandOp(OpTest): self.attrs = { 'kernels': [2, 2], 'strides': [1, 1], - 'paddings': [1, 1, 1, 1] + 'paddings': [1, 1, 1, 1], } def setUp(self): @@ -113,7 +141,8 @@ class TestBlockExpandOp(OpTest): self.batch_size, self.img_channels, self.img_height, self.img_width ]).astype("float32") - out = Im2Sequence(x, self.attrs) + real_size = np.array([]).astype("float32") + out = Im2Sequence(x, real_size, self.attrs) self.inputs = {'X': x} self.outputs = {'Out': out} @@ -133,20 +162,20 @@ class TestBlockExpandOpCase2(TestBlockExpandOp): self.attrs = { 'kernels': [2, 1], 'strides': [2, 1], - 'paddings': [2, 1, 2, 1] + 'paddings': [2, 1, 2, 1], } class TestBlockExpandOpCase3(TestBlockExpandOp): def config(self): - self.batch_size = 3 + self.batch_size = 2 self.img_channels = 1 self.img_height = 4 self.img_width = 5 self.attrs = { 'kernels': [2, 1], 'strides': [2, 1], - 'paddings': [2, 0, 2, 0] + 'paddings': [2, 0, 2, 0], } @@ -159,9 +188,94 @@ class TestBlockExpandOpCase4(TestBlockExpandOp): self.attrs = { 'kernels': [2, 2], 'strides': [1, 1], - 'paddings': [0, 0, 0, 0] + 'paddings': [0, 0, 0, 0], + } + + +class TestBlockExpandOpCase5(OpTest): + def config(self): + self.batch_size = 1 + self.img_channels = 3 + self.img_height = 4 + self.img_width = 5 + self.attrs = { + 'kernels': [2, 1], + 'strides': [2, 1], + 'paddings': [2, 1, 2, 1], + 'out_stride': [2, 2], + } + + def setUp(self): + self.config() + self.op_type = "im2sequence" + x = np.random.uniform(0.1, 1, [ + self.batch_size, self.img_channels, self.img_height, self.img_width + ]).astype("float32") + real_size = np.array([[8, 10], [5, 8]]).astype("float32") + out = np.array(Im2Sequence(x, real_size, self.attrs)) + self.inputs = {'X': x, 'Y': real_size} #l ?? + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + +class TestBlockExpandOpCase6(OpTest): + def config(self): + self.batch_size = 3 + self.img_channels = 1 + self.img_height = 4 + self.img_width = 5 + self.attrs = { + 'kernels': [2, 1], + 'strides': [1, 1], + 'paddings': [0, 0, 0, 0], + 'out_stride': [1, 1], + } + + def setUp(self): + self.config() + self.op_type = "im2sequence" + x = np.random.uniform(0.1, 1, [ + self.batch_size, self.img_channels, self.img_height, self.img_width + ]).astype("float32") + real_size = np.array([[8, 10], [5, 8], [5, 8]]).astype("float32") + out = np.array(Im2Sequence(x, real_size, self.attrs)) + self.inputs = {'X': x, 'Y': real_size} #l ?? + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + +class TestBlockExpandOpCase7(OpTest): + def config(self): + self.batch_size = 2 + self.img_channels = 2 + self.img_height = 3 + self.img_width = 3 + self.attrs = { + 'kernels': [2, 2], + 'strides': [1, 1], + 'paddings': [1, 0, 1, 0], + 'out_stride': [2, 2], } + def setUp(self): + self.config() + self.op_type = "im2sequence" + x = np.random.uniform(0.1, 1, [ + self.batch_size, self.img_channels, self.img_height, self.img_width + ]).astype("float32") + real_size = np.array([[6, 6], [4, 4]]).astype("float32") + out = np.array(Im2Sequence(x, real_size, self.attrs)) + self.inputs = {'X': x, 'Y': real_size} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + if __name__ == '__main__': unittest.main() +#set shiftwidth=4 set expandtab set tabstop=4 diff --git a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py index 6ecfa9ea21..23b1ed957a 100644 --- a/python/paddle/fluid/tests/unittests/test_image_classification_layer.py +++ b/python/paddle/fluid/tests/unittests/test_image_classification_layer.py @@ -43,7 +43,7 @@ class TestLayer(unittest.TestCase): hidden2 = fluid.layers.fc(input=hidden1, size=128, act='relu') fluid.layers.batch_norm(input=hidden2) - print str(main_program) + print(str(main_program)) def test_dropout_layer(self): main_program = Program() @@ -53,7 +53,7 @@ class TestLayer(unittest.TestCase): name='pixel', shape=[3, 48, 48], dtype='float32') fluid.layers.dropout(x=images, dropout_prob=0.5) - print str(main_program) + print(str(main_program)) def test_img_conv_group(self): main_program = Program() @@ -65,7 +65,7 @@ class TestLayer(unittest.TestCase): conv1 = conv_block(images, 64, 2, [0.3, 0]) conv_block(conv1, 256, 3, [0.4, 0.4, 0]) - print str(main_program) + print(str(main_program)) def test_elementwise_add_with_act(self): main_program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index 51460cbb13..4cd203155f 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -48,7 +48,7 @@ class TestBook(unittest.TestCase): exe.run(init_program, feed={}, fetch_list=[]) - for i in xrange(100): + for i in range(100): tensor_x = np.array( [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32") tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32") diff --git a/python/paddle/fluid/tests/unittests/test_initializer.py b/python/paddle/fluid/tests/unittests/test_initializer.py index 587e2025e1..b215e37986 100644 --- a/python/paddle/fluid/tests/unittests/test_initializer.py +++ b/python/paddle/fluid/tests/unittests/test_initializer.py @@ -27,12 +27,13 @@ class TestConstantInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.ConstantInitializer()) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.ConstantInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'fill_constant') @@ -43,12 +44,13 @@ class TestConstantInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.ConstantInitializer(2.3)) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.ConstantInitializer(2.3)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'fill_constant') @@ -61,12 +63,13 @@ class TestUniformInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer()) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -80,18 +83,19 @@ class TestUniformInitializer(unittest.TestCase): program = framework.Program() program.random_seed = 123 block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer()) - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer(seed=456)) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param1", + initializer=initializer.UniformInitializer()) + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param2", + initializer=initializer.UniformInitializer(seed=456)) init_op = block.ops[1] self.assertEqual(init_op.attr("seed"), 123) init_op1 = block.ops[0] @@ -102,12 +106,13 @@ class TestUniformInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.UniformInitializer(-4.2, 3.1, 123)) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(-4.2, 3.1, 123)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -115,6 +120,25 @@ class TestUniformInitializer(unittest.TestCase): self.assertAlmostEqual(init_op.attr('max'), 3.1, delta=DELTA) self.assertEqual(init_op.attr('seed'), 123) + def test_uniform_initializer_two_op(self): + """Test uniform initializer with supplied attributes + """ + program = framework.Program() + block = program.global_block() + for i in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.UniformInitializer(-4.2, float(i), 123)) + self.assertEqual(len(block.ops), 1) + init_op0 = block.ops[0] + self.assertEqual(init_op0.type, 'uniform_random') + self.assertAlmostEqual(init_op0.attr('min'), -4.2, delta=DELTA) + self.assertAlmostEqual(init_op0.attr('max'), 0.0, delta=DELTA) + self.assertEqual(init_op0.attr('seed'), 123) + class TestNormalInitializer(unittest.TestCase): def test_normal_initializer_default_value(self): @@ -122,12 +146,13 @@ class TestNormalInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.NormalInitializer()) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.NormalInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -140,12 +165,13 @@ class TestNormalInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.NormalInitializer(2.3, 1.9, 123)) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.NormalInitializer(2.3, 1.9, 123)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -161,12 +187,13 @@ class TestXavierInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.XavierInitializer()) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.XavierInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -181,12 +208,13 @@ class TestXavierInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10, 15, 20], - lod_level=0, - name="param", - initializer=initializer.XavierInitializer()) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10, 15, 20], + lod_level=0, + name="param", + initializer=initializer.XavierInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -203,12 +231,13 @@ class TestXavierInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.XavierInitializer(uniform=False)) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.XavierInitializer(uniform=False)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -223,12 +252,13 @@ class TestXavierInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10, 15, 20], - lod_level=0, - name="param", - initializer=initializer.XavierInitializer(uniform=False)) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10, 15, 20], + lod_level=0, + name="param", + initializer=initializer.XavierInitializer(uniform=False)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -244,13 +274,14 @@ class TestXavierInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.XavierInitializer( - fan_in=12, fan_out=23, seed=134)) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.XavierInitializer( + fan_in=12, fan_out=23, seed=134)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -267,12 +298,13 @@ class TestMSRAInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.MSRAInitializer()) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.MSRAInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -287,12 +319,13 @@ class TestMSRAInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10, 15, 20], - lod_level=0, - name="param", - initializer=initializer.MSRAInitializer()) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10, 15, 20], + lod_level=0, + name="param", + initializer=initializer.MSRAInitializer()) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -308,12 +341,13 @@ class TestMSRAInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.MSRAInitializer(uniform=False)) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.MSRAInitializer(uniform=False)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -328,12 +362,13 @@ class TestMSRAInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - param = block.create_parameter( - dtype="float32", - shape=[5, 10, 15, 20], - lod_level=0, - name="param", - initializer=initializer.MSRAInitializer(uniform=False)) + for _ in range(2): + param = block.create_parameter( + dtype="float32", + shape=[5, 10, 15, 20], + lod_level=0, + name="param", + initializer=initializer.MSRAInitializer(uniform=False)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'gaussian_random') @@ -348,13 +383,14 @@ class TestMSRAInitializer(unittest.TestCase): """ program = framework.Program() block = program.global_block() - block.create_parameter( - dtype="float32", - shape=[5, 10], - lod_level=0, - name="param", - initializer=initializer.MSRAInitializer( - fan_in=12, seed=134)) + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="param", + initializer=initializer.MSRAInitializer( + fan_in=12, seed=134)) self.assertEqual(len(block.ops), 1) init_op = block.ops[0] self.assertEqual(init_op.type, 'uniform_random') @@ -364,5 +400,23 @@ class TestMSRAInitializer(unittest.TestCase): self.assertEqual(init_op.attr('seed'), 134) +class TestMSRAInitializer(unittest.TestCase): + def test_bilinear_initializer(self): + """Test the bilinear initializer with supplied arguments + """ + program = framework.Program() + block = program.global_block() + for _ in range(2): + block.create_parameter( + dtype="float32", + shape=[8, 1, 3, 3], + lod_level=0, + name="param", + initializer=initializer.BilinearInitializer()) + self.assertEqual(len(block.ops), 1) + init_op = block.ops[0] + self.assertEqual(init_op.type, 'assign_value') + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py index 8f62ac20a5..eff4212d91 100644 --- a/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py +++ b/python/paddle/fluid/tests/unittests/test_iou_similarity_op.py @@ -58,8 +58,8 @@ class TestIOUSimilarityOpWithLoD(TestIOUSimilarityOp): def setUp(self): super(TestIOUSimilarityOpWithLoD, self).setUp() - self.boxes1_lod = [[0, 1, 2]] - self.output_lod = [[0, 1, 2]] + self.boxes1_lod = [[1, 1]] + self.output_lod = [[1, 1]] self.inputs = {'X': (self.boxes1, self.boxes1_lod), 'Y': self.boxes2} self.outputs = {'Out': (self.output, self.output_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_is_empty_op.py b/python/paddle/fluid/tests/unittests/test_is_empty_op.py index 4d11cf226b..11121d9b65 100644 --- a/python/paddle/fluid/tests/unittests/test_is_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_is_empty_op.py @@ -14,42 +14,24 @@ import unittest import numpy as np -from paddle.fluid.op import Operator -import paddle.fluid.core as core +from op_test import OpTest -def create_tensor(scope, name, np_data): - tensor = scope.var(name).get_tensor() - tensor.set_dims(np_data.shape) - tensor.set(np_data, core.CPUPlace()) - return tensor - - -class TestIsEmptyOp(unittest.TestCase): +class TestEmpty(OpTest): def setUp(self): - self.scope = core.Scope() - # create input variables - np_data0 = np.array([0, 1, 2]) - create_tensor(self.scope, "X0", np_data0) - - np_data1 = np.array([1]) - t = create_tensor(self.scope, "X1", np_data1) - t.set_dims([0]) + self.op_type = "is_empty" + self.inputs = {'X': np.array([1, 2, 3])} + self.outputs = {'Out': np.array([False])} - # create output variables - self.scope.var("out") + def test_check_output(self): + self.check_output() - def test_no_empty(self): - self.one_case("X0", False) - def test_empty(self): - self.one_case("X1", True) - - def one_case(self, input, target): - op = Operator(type="is_empty", X=input, Out="out") - op.run(self.scope, core.CPUPlace()) - out = self.scope.var("out").get_tensor() - self.assertEqual(np.array(out)[0], target) +class TestNotEmpty(TestEmpty): + def setUp(self): + self.op_type = "is_empty" + self.inputs = {'X': np.array([])} + self.outputs = {'Out': np.array([True])} if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py index 69365db4d1..295887ccd1 100644 --- a/python/paddle/fluid/tests/unittests/test_layer_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_layer_norm_op.py @@ -17,6 +17,7 @@ import numpy as np from operator import mul import paddle.fluid.core as core import paddle.fluid as fluid +from functools import reduce np.random.random(123) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index c5414abf0f..8f2dac786d 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -16,6 +16,7 @@ from __future__ import print_function import unittest import paddle.fluid.layers as layers +from paddle.fluid.layers.device import get_places import paddle.fluid.nets as nets from paddle.fluid.framework import Program, program_guard, default_main_program from paddle.fluid.param_attr import ParamAttr @@ -173,6 +174,16 @@ class TestBook(unittest.TestCase): x=dat, label=lbl)) print(str(program)) + def test_hsigmoid(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[2], dtype='float32') + y = layers.data(name='y', shape=[2], dtype='int64') + self.assertIsNotNone( + layers.hsigmoid( + input=x, label=y, num_classes=2)) + print(str(program)) + def test_sequence_expand(self): program = Program() with program_guard(program): @@ -238,7 +249,7 @@ class TestBook(unittest.TestCase): def test_get_places(self): program = Program() with program_guard(program): - x = layers.get_places(device_count=4) + x = get_places(device_count=4) self.assertIsNotNone(x) print(str(program)) @@ -251,12 +262,16 @@ class TestBook(unittest.TestCase): print(str(program)) def test_im2sequence(self): - print("test_im2sequence") program = Program() with program_guard(program): x = layers.data(name='x', shape=[3, 128, 128], dtype='float32') + y = layers.data(name='y', shape=[], dtype='float32') output = layers.im2sequence( - input=x, stride=[1, 1], filter_size=[2, 2]) + input=x, + input_image_size=y, + stride=[1, 1], + filter_size=[2, 2], + out_stride=[1, 1]) self.assertIsNotNone(output) print(str(program)) @@ -264,7 +279,7 @@ class TestBook(unittest.TestCase): def test_nce(self): window_size = 5 words = [] - for i in xrange(window_size): + for i in range(window_size): words.append( layers.data( name='word_{0}'.format(i), shape=[1], dtype='int64')) @@ -273,7 +288,7 @@ class TestBook(unittest.TestCase): label_word = int(window_size / 2) + 1 embs = [] - for i in xrange(window_size): + for i in range(window_size): if i == label_word: continue @@ -369,6 +384,96 @@ class TestBook(unittest.TestCase): self.assertIsNotNone(output) print(str(program)) + def test_resize_bilinear(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[3, 9, 6], dtype="float32") + output = layers.resize_bilinear(x, out_shape=[12, 12]) + self.assertIsNotNone(output) + output = layers.resize_bilinear(x, scale=3) + self.assertIsNotNone(output) + print(str(program)) + + def test_polygon_box_transform(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[8, 4, 4], dtype="float32") + output = layers.polygon_box_transform(input=x) + self.assertIsNotNone(output) + print(str(program)) + + def test_l2_normalize(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[8, 7, 10], dtype="float32") + output = layers.l2_normalize(x, axis=1) + + def test_maxout(self): + program = Program() + with program_guard(program): + data = layers.data(name='x', shape=[8, 6, 6], dtype="float32") + output = layers.maxout(x=data, groups=2) + self.assertIsNotNone(output) + print(str(program)) + + def test_crop(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[3, 5], dtype="float32") + y = layers.data(name='y', shape=[2, 3], dtype="float32") + output = layers.crop(x, shape=y) + self.assertIsNotNone(output) + print(str(program)) + + def test_mean_iou(self): + program = Program() + with program_guard(program): + x = layers.data(name='x', shape=[16], dtype='float32') + y = layers.data(name='label', shape=[1], dtype='int64') + iou = layers.mean_iou(x, y, 2) + self.assertIsNotNone(iou) + print(str(program)) + + def test_argsort(self): + program = Program() + with program_guard(program): + data = layers.data(name='x', shape=[2, 3, 3], dtype="float32") + out, ids = layers.argsort(input=data, axis=1) + self.assertIsNotNone(out) + self.assertIsNotNone(ids) + print(str(program)) + + def test_rank_loss(self): + program = Program() + with program_guard(program): + label = layers.data( + name='label', + append_batch_size=False, + shape=[16, 1], + dtype="float32") + left = layers.data( + name='left', + append_batch_size=False, + shape=[16, 1], + dtype="float32") + right = layers.data( + name='right', + append_batch_size=False, + shape=[16, 1], + dtype="float32") + out = layers.rank_loss(label, left, right, name="rank_loss") + self.assertIsNotNone(out) + print(str(program)) + + def test_shape(self): + program = Program() + with program_guard(program): + input = layers.data( + name="input", shape=[3, 100, 100], dtype="float32") + out = layers.shape(input, name="shape") + self.assertIsNotNone(out) + print(str(program)) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py index 6382e290eb..e628195e72 100644 --- a/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py +++ b/python/paddle/fluid/tests/unittests/test_learning_rate_scheduler.py @@ -91,20 +91,21 @@ class TestLearningRateDecay(unittest.TestCase): def check_decay_with_place(self, place, python_decay_fn, fluid_decay_fn, kwargs): + main_prog = fluid.Program() + startup_prog = fluid.Program() - decayed_lr = fluid_decay_fn(**kwargs) + with fluid.program_guard(main_prog, startup_prog): + decayed_lr = fluid_decay_fn(**kwargs) place = fluid.CPUPlace() exe = fluid.Executor(place) - exe.run(fluid.default_startup_program()) + exe.run(startup_prog) - fluid.memory_optimize(fluid.default_main_program()) + fluid.memory_optimize(main_prog) for step in range(10): - lr_val, = exe.run(fluid.default_main_program(), - feed={}, - fetch_list=[decayed_lr]) + lr_val, = exe.run(main_prog, feed={}, fetch_list=[decayed_lr]) python_decayed_lr = python_decay_fn( global_step=float(step), **kwargs) self.assertAlmostEqual( diff --git a/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py index f49f7635f7..696d0ab4fa 100644 --- a/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py +++ b/python/paddle/fluid/tests/unittests/test_linear_chain_crf_op.py @@ -105,11 +105,13 @@ class TestLinearChainCrfOp(OpTest): MAX_SEQ_LEN = 5 # the linear_chain_crf operator only supports sequence (LoD level = 1) - lod = [[0]] + lod = [[]] + seq_start_pos = [0] for i in range(SEQ_NUM): - lod[-1].append(lod[-1][-1] + random.randint(1, MAX_SEQ_LEN)) - emission = np.random.uniform(-1, 1, - [lod[-1][-1], TAG_NUM]).astype("float64") + lod[-1].append(random.randint(1, MAX_SEQ_LEN)) + seq_start_pos.append(seq_start_pos[-1] + lod[-1][-1]) + emission = np.random.uniform( + -1, 1, [seq_start_pos[-1], TAG_NUM]).astype("float64") emission_row_max = np.amax(emission, axis=1, keepdims=True) emission_exps = np.exp(emission - emission_row_max) @@ -118,14 +120,14 @@ class TestLinearChainCrfOp(OpTest): transition_exps = np.exp(transition) labels = np.random.randint( - low=0, high=TAG_NUM, size=(lod[-1][-1], 1), dtype="int64") + low=0, high=TAG_NUM, size=(seq_start_pos[-1], 1), dtype="int64") self.inputs = { "Emission": (emission, lod), "Transition": transition, "Label": (labels, lod) } - crf = LinearChainCrfForward(lod[0], emission, emission_row_max, + crf = LinearChainCrfForward(seq_start_pos, emission, emission_row_max, emission_exps, transition, transition_exps, labels) alpha, log_likelihood = crf.crf_forward_compute() diff --git a/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py new file mode 100644 index 0000000000..1cdc695010 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_listen_and_serv_op.py @@ -0,0 +1,110 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle +import paddle.fluid as fluid +import os +import signal +import subprocess +import time +import unittest +from multiprocessing import Process +from op_test import OpTest + + +def run_pserver(use_cuda, sync_mode, ip, port, trainers, trainer_id): + x = fluid.layers.data(name='x', shape=[1], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + + # loss function + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + # optimizer + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + pserver_endpoints = ip + ":" + port + current_endpoint = ip + ":" + port + t = fluid.DistributeTranspiler() + t.transpile( + trainer_id, + pservers=pserver_endpoints, + trainers=trainers, + sync_mode=sync_mode) + pserver_prog = t.get_pserver_program(current_endpoint) + pserver_startup = t.get_startup_program(current_endpoint, pserver_prog) + exe.run(pserver_startup) + exe.run(pserver_prog) + + +class TestListenAndServOp(OpTest): + def setUp(self): + self.ps_timeout = 5 + self.ip = "127.0.0.1" + self.port = "0" + self.trainers = 1 + self.trainer_id = 0 + + def _start_pserver(self, use_cuda, sync_mode): + p = Process( + target=run_pserver, + args=(use_cuda, sync_mode, self.ip, self.port, self.trainers, + self.trainer_id)) + p.daemon = True + p.start() + return p + + def _wait_ps_ready(self, pid): + start_left_time = self.ps_timeout + sleep_time = 0.5 + while True: + assert start_left_time >= 0, "wait ps ready failed" + time.sleep(sleep_time) + try: + # the listen_and_serv_op would touch a file which contains the listen port + # on the /tmp directory until it was ready to process all the RPC call. + os.stat("/tmp/paddle.%d.port" % pid) + return + except os.error: + start_left_time -= sleep_time + + def test_rpc_interfaces(self): + # TODO(Yancey1989): need to make sure the rpc interface correctly. + pass + + def test_handle_signal_in_serv_op(self): + # run pserver on CPU in sync mode + p1 = self._start_pserver(False, True) + self._wait_ps_ready(p1.pid) + + # raise SIGTERM to pserver + os.kill(p1.pid, signal.SIGINT) + p1.join() + + # run pserver on CPU in async mode + p2 = self._start_pserver(False, False) + self._wait_ps_ready(p2.pid) + + # raise SIGTERM to pserver + os.kill(p2.pid, signal.SIGTERM) + p2.join() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py index 093eecb837..d53ead381d 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_rank_table.py +++ b/python/paddle/fluid/tests/unittests/test_lod_rank_table.py @@ -12,7 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle.fluid.layers import lod_rank_table, data +from paddle.fluid.layers import data +from paddle.fluid.layers.control_flow import lod_rank_table from paddle.fluid.executor import Executor import paddle.fluid.core as core import numpy @@ -30,11 +31,12 @@ class TestLoDRankTable(unittest.TestCase): tensor = core.LoDTensor() tensor.set(numpy.random.random(size=(17, 100)), cpu) - tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]]) + tensor.set_recursive_sequence_lengths( + [[1, 2], [5, 1, 1], [3, 1, 5, 1, 3, 3, 1]]) exe.run(scope=scope, feed={'x': tensor}) var = scope.find_var(rank_table.name) table = var.get_lod_rank_table() - self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items()) + self.assertEqual([(0, 5), (1, 1), (2, 1)], list(table.items())) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py index 6b6d4c824a..77905c4b96 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_reset_op.py +++ b/python/paddle/fluid/tests/unittests/test_lod_reset_op.py @@ -21,11 +21,15 @@ class TestLodResetOpByAttr(OpTest): def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float32") - lod = [[0, 3, 5, 10]] - target_lod_0 = [0, 7, 10] + lod = [[3, 2, 5]] + # target_offset_lod and target_lod are the same lod info represented + # in offset-based format and length-based format, respectively. + target_offset_lod = [0, 7, 10] + target_lod = [7, 3] self.inputs = {'X': (x, lod)} - self.attrs = {'target_lod': target_lod_0} - self.outputs = {'Out': (x, [target_lod_0])} + # The `target_lod` attribute is still based on offset + self.attrs = {'target_lod': target_offset_lod} + self.outputs = {'Out': (x, [target_lod])} def test_check_output(self): self.check_output() @@ -38,13 +42,16 @@ class TestLodResetOpByInput(OpTest): def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float32") - lod = [[0, 3, 5, 10]] - target_lod_0 = [0, 4, 7, 10] + lod = [[3, 2, 5]] + # target_offset_lod and target_lod are the same lod info represented + # in offset-based format and length-based format, respectively. + target_offset_lod = [0, 4, 7, 10] + target_lod = [4, 3, 3] self.inputs = { 'X': (x, lod), - 'Y': np.array([target_lod_0]).astype('int32') + 'Y': np.array([target_offset_lod]).astype('int32') } - self.outputs = {'Out': (x, [target_lod_0])} + self.outputs = {'Out': (x, [target_lod])} def test_check_output(self): self.check_output() @@ -57,15 +64,16 @@ class TestLodResetOpBoth(OpTest): def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float32") - lod = [[0, 3, 5, 10]] - target_lod_0_attr = [0, 7, 10] - target_lod_0_in = [0, 4, 7, 10] + lod = [[3, 2, 5]] + target_offset_lod_attr = [0, 7, 10] + target_offset_lod_in = [0, 4, 7, 10] + target_lod_in = [4, 3, 3] self.inputs = { 'X': (x, lod), - 'Y': np.array(target_lod_0_in).astype('int32') + 'Y': np.array(target_offset_lod_in).astype('int32') } - self.attrs = {'target_lod': target_lod_0_attr} - self.outputs = {'Out': (x, [target_lod_0_in])} + self.attrs = {'target_lod': target_offset_lod_attr} + self.outputs = {'Out': (x, [target_lod_in])} def test_check_output(self): self.check_output() @@ -78,11 +86,11 @@ class TestLodResetOpYIsLoDTensor(OpTest): def setUp(self): self.op_type = "lod_reset" x = np.random.random((10, 20)).astype("float32") - lod = [[0, 3, 5, 10]] + lod = [[3, 2, 5]] y = np.random.random((10, 10)).astype("float32") - target_lod_0 = [[0, 4, 7, 10]] - self.inputs = {'X': (x, lod), 'Y': (y, target_lod_0)} - self.outputs = {'Out': (x, target_lod_0)} + target_lod = [[4, 3, 3]] + self.inputs = {'X': (x, lod), 'Y': (y, target_lod)} + self.outputs = {'Out': (x, target_lod)} def test_check_output(self): self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py index 63b17a5ccd..0ac6d9b81d 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array.py @@ -24,28 +24,28 @@ class TestLoDTensorArray(unittest.TestCase): tensor_array = arr.get_lod_tensor_array() self.assertEqual(0, len(tensor_array)) cpu = core.CPUPlace() - for i in xrange(10): + for i in range(10): t = core.LoDTensor() t.set(numpy.array([i], dtype='float32'), cpu) - t.set_lod([[0, 1]]) + t.set_recursive_sequence_lengths([[1]]) tensor_array.append(t) self.assertEqual(10, len(tensor_array)) - for i in xrange(10): + for i in range(10): t = tensor_array[i] self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32')) - self.assertEqual([[0, 1]], t.lod()) + self.assertEqual([[1]], t.recursive_sequence_lengths()) t = core.LoDTensor() t.set(numpy.array([i + 10], dtype='float32'), cpu) - t.set_lod([[0, 2]]) + t.set_recursive_sequence_lengths([[1]]) tensor_array[i] = t t = tensor_array[i] self.assertEqual( numpy.array(t), numpy.array( [i + 10], dtype='float32')) - self.assertEqual([[0, 2]], t.lod()) + self.assertEqual([[1]], t.recursive_sequence_lengths()) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py index 66a03640c1..9789ff4af6 100644 --- a/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py +++ b/python/paddle/fluid/tests/unittests/test_lod_tensor_array_ops.py @@ -20,6 +20,11 @@ from paddle.fluid.framework import Program, program_guard from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward +from paddle.fluid.layers.control_flow import lod_rank_table +from paddle.fluid.layers.control_flow import max_sequence_len +from paddle.fluid.layers.control_flow import lod_tensor_to_array +from paddle.fluid.layers.control_flow import array_to_lod_tensor + class TestCPULoDTensorArrayOps(unittest.TestCase): def place(self): @@ -29,9 +34,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor = core.LoDTensor() tensor.set( numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 3, 9, 10]]) - expect = map(lambda x: numpy.array(x).astype('int32'), - [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) + tensor.set_recursive_sequence_lengths([[3, 6, 1]]) + expect = [ + numpy.array(x).astype('int32') + for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] + ] self.main( tensor=tensor, expect_array=expect, @@ -42,9 +49,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor = core.LoDTensor() tensor.set( numpy.arange(10).reshape(10, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 3, 9, 9, 10]]) - expect = map(lambda x: numpy.array(x).astype('int32'), - [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]]) + tensor.set_recursive_sequence_lengths([[3, 6, 0, 1]]) + expect = [ + numpy.array(x).astype('int32') + for x in [[3, 0, 9], [4, 1], [5, 2], [6], [7], [8]] + ] self.main( tensor=tensor, expect_array=expect, @@ -55,7 +64,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor = core.LoDTensor() tensor.set( numpy.arange(20).reshape(20, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 2, 5], [0, 3, 9, 11, 17, 20]]) + tensor.set_recursive_sequence_lengths([[2, 3], [3, 6, 2, 6, 3]]) expect = [ numpy.array( @@ -65,7 +74,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): [17, 18, 19], dtype='int32') ] - lod = [[[0, 2, 5]], [[0, 6, 12]], [[0, 3]]] + lod = [[[2, 3]], [[6, 6]], [[3]]] self.main( tensor=tensor, expect_array=expect, @@ -77,8 +86,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor.set( numpy.arange(31).reshape(31, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 3, 5, 9, 11], - [0, 3, 7, 11, 11, 12, 17, 19, 21, 23, 30, 31]]) + tensor.set_recursive_sequence_lengths( + [[3, 2, 4, 2], [3, 4, 4, 0, 1, 5, 2, 2, 2, 7, 1]]) expect = [ numpy.array( @@ -88,7 +97,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): ], [17, 18, 3, 4, 5, 6, 11, 30], [19, 20, 7, 8, 9, 10], [21, 22]] ] - lod = [[[0, 5, 8, 8, 15]], [[0, 2, 6, 7, 8]], [[0, 2, 6]], [[0, 2]]] + lod = [[[5, 3, 0, 7]], [[2, 4, 1, 1]], [[2, 4]], [[2]]] self.main( tensor=tensor, expect_array=expect, @@ -99,17 +108,18 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor = core.LoDTensor() tensor.set( numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13], - [0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]]) + tensor.set_recursive_sequence_lengths( + [[2, 3, 1], [2, 3, 1, 4, 2, 1], + [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]]) expect = [ numpy.array( item, dtype='int32') - for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], range( - 22, 39) + range(7, 21), range(39, 46)] + for item in [[21, 0, 1, 2, 3, 4, 5, 6, 46, 47, 48, 49], list( + range(22, 39)) + list(range(7, 21)), list(range(39, 46))] ] - lod = [[[0, 1, 3, 4], [0, 1, 4, 8, 12]], - [[0, 4, 7], [0, 1, 5, 9, 17, 21, 27, 31]], [[0, 2], [0, 6, 7]]] + lod = [[[1, 2, 1], [1, 3, 4, 4]], [[4, 3], [1, 4, 4, 8, 4, 6, 4]], + [[2], [6, 1]]] self.main( tensor=tensor, expect_array=expect, @@ -120,8 +130,9 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): tensor = core.LoDTensor() tensor.set( numpy.arange(50).reshape(50, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 2, 5, 6], [0, 2, 5, 6, 10, 12, 13], - [0, 3, 7, 11, 17, 21, 22, 23, 27, 31, 39, 45, 46, 50]]) + tensor.set_recursive_sequence_lengths( + [[2, 3, 1], [2, 3, 1, 4, 2, 1], + [3, 4, 4, 6, 4, 1, 1, 4, 4, 8, 6, 1, 4]]) self.main( tensor=tensor, expect_array=None, @@ -135,13 +146,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): with program_guard(program): x = layers.data(name='x', shape=[10]) x.persistable = True - table = layers.lod_rank_table(x, level=level) - max_len = layers.max_sequence_len(table) + table = lod_rank_table(x, level=level) + max_len = max_sequence_len(table) max_len.persistable = True - array = layers.lod_tensor_to_array(x, table) + array = lod_tensor_to_array(x, table) array.persistable = True - result = layers.array_to_lod_tensor(array, table) + result = array_to_lod_tensor(array, table) result.persistable = True exe = Executor(place) scope = core.Scope() @@ -162,12 +173,13 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): exp_tensor, exp_lod = exp exp_tensor = numpy.expand_dims(exp_tensor, axis=1) self.assertTrue(numpy.allclose(exp_tensor, numpy.array(array[i]))) - self.assertEqual(exp_lod, array[i].lod()) + self.assertEqual(exp_lod, array[i].recursive_sequence_lengths()) def check_tensor_same(self, actual, expect): self.assertTrue( numpy.allclose(numpy.array(actual), numpy.array(expect))) - self.assertEqual(actual.lod(), expect.lod()) + self.assertEqual(actual.recursive_sequence_lengths(), + expect.recursive_sequence_lengths()) class TestCPULoDTensorArrayOpGrad(unittest.TestCase): @@ -178,9 +190,9 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): with program_guard(program): x = layers.data( name='x', shape=[1], dtype='float32', stop_gradient=False) - table = layers.lod_rank_table(x, level=0) - array = layers.lod_tensor_to_array(x, table) - result = layers.array_to_lod_tensor(array, table) + table = lod_rank_table(x, level=0) + array = lod_tensor_to_array(x, table) + result = array_to_lod_tensor(array, table) mean = layers.mean(result) @@ -188,7 +200,7 @@ class TestCPULoDTensorArrayOpGrad(unittest.TestCase): tensor = core.LoDTensor() tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place) - tensor.set_lod([[0, 3, 9, 10]]) + tensor.set_recursive_sequence_lengths([[3, 6, 1]]) g_vars = program.global_block().var(x.name + "@GRAD") diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index f8d5785fbf..ac25f432df 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -35,77 +35,59 @@ class TestLookupTableOp(OpTest): self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) +class TestLookupTableOpWithTensorIds(OpTest): + def setUp(self): + self.op_type = "lookup_table" + table = np.random.random((17, 31)).astype("float32") + ids = np.random.randint( + low=0, high=17, size=(2, 4, 5, 1)).astype("int64") + self.inputs = {'W': table, 'Ids': ids} + self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) + + class TestLookupTableOpWithPadding(TestLookupTableOp): def test_check_output(self): ids = np.squeeze(self.inputs['Ids']) padding_idx = np.random.choice(ids, 1)[0] self.outputs['Out'][ids == padding_idx] = np.zeros(31) - self.attrs = {'padding_idx': long(padding_idx)} + self.attrs = {'padding_idx': int(padding_idx)} self.check_output() def test_check_grad(self): - # Since paddings are not trainable and fixed in forward, the gradient of + # Since paddings are not trainable and fixed in forward, the gradient of # paddings makes no sense and we don't test the gradient here. pass -class TestLookupTableIdsIsSelectedRows(OpTest): - def check_with_place(self, place): - scope = core.Scope() - - # create and initialize Variable - height = 10 - rows = [0, 4, 4, 7] - row_numel = 12 - - # create and initialize W Variable - W = scope.var('W').get_tensor() - W_array = np.full((height, row_numel), 1.0).astype("float32") - for i in range(height): - W_array[i] *= i - W.set(W_array, place) - - # create and initialize Ids Variable - ids_selected_rows = scope.var('Ids').get_selected_rows() - ids_selected_rows.set_height(len(rows)) - ids_selected_rows.set_rows(rows) - np_array = np.ones((len(rows), row_numel)).astype("float32") - ids_tensor = ids_selected_rows.get_tensor() - ids_tensor.set(np_array, place) - - # create Out Variable - Out = scope.var('Out').get_selected_rows() - - # create and run lookup_table operator - concat_rows_op = Operator("lookup_table", W='W', Ids='Ids', Out='Out') - concat_rows_op.run(scope, place) - - # get result from Out - Out_tensor = Out.get_tensor() - result_array = np.array(Out_tensor) - - # all(): return True if all elements of the iterable are true (or if the iterable is empty) - for idx, row in enumerate(rows): - assert (row == result_array[idx]).all() +class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): + def test_check_output(self): + ids = self.inputs['Ids'] + flatten_idx = ids.flatten() + padding_idx = np.random.choice(flatten_idx, 1)[0] + self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) + self.attrs = {'padding_idx': long(padding_idx)} + self.check_output() - def test_concat_rows(self): - places = [core.CPUPlace()] - if core.is_compiled_with_cuda(): - places.append(core.CUDAPlace(0)) - for place in places: - self.check_with_place(place) + def test_check_grad(self): + # Since paddings are not trainable and fixed in forward, the gradient of + # paddings makes no sense and we don't test the gradient here. + pass class TestLookupTableWIsSelectedRows(OpTest): - def check_with_place(self, place): - scope = core.Scope() - - # create and initialize Id Variable + def prepare_ids(self, scope, place): ids_tensor = scope.var('Ids').get_tensor() ids_array = np.array([[0], [4], [3], [5]]).astype("int64") ids_tensor.set(ids_array, place) + return ids_array - # create and initialize W Variable + def prepare_w(self, scope, place): rows = [0, 1, 2, 3, 4, 5, 6] row_numel = 12 @@ -118,8 +100,22 @@ class TestLookupTableWIsSelectedRows(OpTest): w_tensor = w_selected_rows.get_tensor() w_tensor.set(w_array, place) - # create Out Variable - out_tensor = scope.var('Out').get_tensor() + def create_out_tensor(self, scope, place): + return scope.var('Out').get_tensor() + + def check_result(self, ids_array, result_array): + # all(): return True if all elements of the iterable are true (or if the iterable is empty) + for idx, row in enumerate(ids_array): + assert (row[0] == result_array[idx]).all() + + def check_with_place(self, place): + scope = core.Scope() + + ids_array = self.prepare_ids(scope, place) + + self.prepare_w(scope, place) + + out_tensor = self.create_out_tensor(scope, place) # create and run lookup_table operator lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out') @@ -127,9 +123,8 @@ class TestLookupTableWIsSelectedRows(OpTest): # get result from Out result_array = np.array(out_tensor) - # all(): return True if all elements of the iterable are true (or if the iterable is empty) - for idx, row in enumerate(ids_array): - assert (row[0] == result_array[idx]).all() + + self.check_result(ids_array, result_array) def test_w_is_selected_rows(self): places = [core.CPUPlace()] @@ -138,5 +133,19 @@ class TestLookupTableWIsSelectedRows(OpTest): self.check_with_place(place) +class TestLookupTableWithTensorIdsWIsSelectedRows( + TestLookupTableWIsSelectedRows): + def prepare_ids(self, scope, place): + ids_tensor = scope.var('Ids').get_tensor() + ids_array = np.random.randint( + low=0, high=6, size=(2, 4, 3, 1)).astype("int64") + ids_tensor.set(ids_array, place) + return ids_array + + def check_result(self, ids_array, result_array): + for idx, row in np.ndenumerate(ids_array): + assert (row == result_array[idx]).all() + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lstm_op.py b/python/paddle/fluid/tests/unittests/test_lstm_op.py index f8ff5a3361..705a24bd8f 100644 --- a/python/paddle/fluid/tests/unittests/test_lstm_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstm_op.py @@ -84,15 +84,17 @@ def lstm( h = g_o * act_cell(c) return h, c - def _reverse(x, lod): + def _reverse(x, offset): y = np.zeros_like(x) - for i in range(len(lod) - 1): - b, e = lod[i], lod[i + 1] + for i in range(len(offset) - 1): + b, e = offset[i], offset[i + 1] y[b:e, :] = np.flip(x[b:e, :], 0) return y - offset = lod[0] - batch_size = len(offset) - 1 + offset = [0] + for l in lod[0]: + offset.append(offset[-1] + l) + batch_size = len(lod[0]) hidden = [] cell = [] input = _reverse(input, offset) if is_reverse else input @@ -100,7 +102,7 @@ def lstm( input = input + np.tile(w_b, (offset[-1], 1)) for i in range(batch_size): # compute one sequence - seq_len = offset[i + 1] - offset[i] + seq_len = lod[0][i] x = input[offset[i]:offset[i + 1], :] h_pre = h0[i] # 1 x D c_pre = c0[i] # 1 x D @@ -124,7 +126,7 @@ def lstm( class TestLstmOp(OpTest): def set_argument(self): - self.lod = [[0, 2, 5, 7]] + self.lod = [[2, 3, 2]] self.D = 16 self.act_gate = 'sigmoid' @@ -139,8 +141,8 @@ class TestLstmOp(OpTest): self.set_argument() self.op_type = 'lstm' - T = self.lod[0][-1] - N = len(self.lod[0]) - 1 + T = sum(self.lod[0]) + N = len(self.lod[0]) x = np.random.normal(size=(T, 4 * self.D)).astype('float64') if self.has_initial_state: @@ -186,7 +188,7 @@ class TestLstmOp(OpTest): def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchCellPreAct'] = np.zeros( (N, self.D)).astype('float64') @@ -194,107 +196,104 @@ class TestLstmOp(OpTest): ['Input', 'Weight', 'Bias'], ['Hidden'], max_relative_error=5e-4) -class TestLstmOpHasInitial(TestLstmOp): - def set_argument(self): - self.lod = [[0, 2, 5, 7]] - self.D = 16 - - self.act_gate = 'sigmoid' - self.act_cell = 'tanh' - self.act_cand = 'tanh' - - self.has_initial_state = True - self.is_reverse = True - self.use_peepholes = True - - def test_check_grad(self): - # TODO(qingqing) remove folowing lines after the check_grad is refined. - N = len(self.lod[0]) - 1 - self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad( - ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'], - max_relative_error=5e-4) - - def test_check_grad_ingore_bias(self): - N = len(self.lod[0]) - 1 - self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad( - ['Input', 'Weight'], ['Hidden'], - max_relative_error=5e-4, - no_grad_set=set('Bias')) - - def test_check_grad_ingore_weight(self): - N = len(self.lod[0]) - 1 - self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad( - ['Input', 'Bias'], ['Hidden'], - max_relative_error=5e-4, - no_grad_set=set('Weight')) - - def test_check_grad_ingore_input(self): - N = len(self.lod[0]) - 1 - self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad( - ['Weight', 'Bias'], ['Hidden'], - max_relative_error=5e-4, - no_grad_set=set('Input')) - - def test_check_grad_ingore_h0(self): - N = len(self.lod[0]) - 1 - self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad( - ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'], - max_relative_error=5e-4, - no_grad_set=set('H0')) - - def test_check_grad_ingore_c0(self): - N = len(self.lod[0]) - 1 - self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') - self.outputs['BatchCellPreAct'] = np.zeros( - (N, self.D)).astype('float64') - self.check_grad( - ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'], - max_relative_error=5e-4, - no_grad_set=set('C0')) - - -class TestLstmOpRerverse(TestLstmOp): - def set_argument(self): - self.lod = [[0, 2, 5, 7]] - self.D = 16 - - self.act_gate = 'sigmoid' - self.act_cell = 'tanh' - self.act_cand = 'tanh' - - self.has_initial_state = False - self.is_reverse = True - self.use_peepholes = True - - -class TestLstmOpNotUsePeepholes(TestLstmOp): - def set_argument(self): - self.lod = [[0, 2, 5, 7]] - self.D = 16 - - self.act_gate = 'sigmoid' - self.act_cell = 'tanh' - self.act_cand = 'tanh' - - self.has_initial_state = False - self.is_reverse = True - self.use_peepholes = False - +# class TestLstmOpHasInitial(TestLstmOp): +# def set_argument(self): +# self.lod = [[2, 3, 2]] +# self.D = 16 + +# self.act_gate = 'sigmoid' +# self.act_cell = 'tanh' +# self.act_cand = 'tanh' + +# self.has_initial_state = True +# self.is_reverse = True +# self.use_peepholes = True + +# def test_check_grad(self): +# # TODO(qingqing) remove folowing lines after the check_grad is refined. +# N = len(self.lod[0]) +# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') +# self.outputs['BatchCellPreAct'] = np.zeros( +# (N, self.D)).astype('float64') +# self.check_grad( +# ['Input', 'Weight', 'Bias', 'H0', 'C0'], ['Hidden'], +# max_relative_error=5e-4) + +# def test_check_grad_ingore_bias(self): +# N = len(self.lod[0]) +# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') +# self.outputs['BatchCellPreAct'] = np.zeros( +# (N, self.D)).astype('float64') +# self.check_grad( +# ['Input', 'Weight'], ['Hidden'], +# max_relative_error=5e-4, +# no_grad_set=set('Bias')) + +# def test_check_grad_ingore_weight(self): +# N = len(self.lod[0]) +# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') +# self.outputs['BatchCellPreAct'] = np.zeros( +# (N, self.D)).astype('float64') +# self.check_grad( +# ['Input', 'Bias'], ['Hidden'], +# max_relative_error=5e-4, +# no_grad_set=set('Weight')) + +# def test_check_grad_ingore_input(self): +# N = len(self.lod[0]) +# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') +# self.outputs['BatchCellPreAct'] = np.zeros( +# (N, self.D)).astype('float64') +# self.check_grad( +# ['Weight', 'Bias'], ['Hidden'], +# max_relative_error=5e-4, +# no_grad_set=set('Input')) + +# def test_check_grad_ingore_h0(self): +# N = len(self.lod[0]) +# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') +# self.outputs['BatchCellPreAct'] = np.zeros( +# (N, self.D)).astype('float64') +# self.check_grad( +# ['Input', 'Weight', 'Bias', 'C0'], ['Hidden'], +# max_relative_error=5e-4, +# no_grad_set=set('H0')) + +# def test_check_grad_ingore_c0(self): +# N = len(self.lod[0]) +# self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') +# self.outputs['BatchCellPreAct'] = np.zeros( +# (N, self.D)).astype('float64') +# self.check_grad( +# ['Input', 'Weight', 'Bias', 'H0'], ['Hidden'], +# max_relative_error=5e-4, +# no_grad_set=set('C0')) + +# class TestLstmOpRerverse(TestLstmOp): +# def set_argument(self): +# self.lod = [[2, 3, 2]] +# self.D = 16 + +# self.act_gate = 'sigmoid' +# self.act_cell = 'tanh' +# self.act_cand = 'tanh' + +# self.has_initial_state = False +# self.is_reverse = True +# self.use_peepholes = True + +# class TestLstmOpNotUsePeepholes(TestLstmOp): +# def set_argument(self): +# self.lod = [[2, 3, 2]] +# self.D = 16 + +# self.act_gate = 'sigmoid' +# self.act_cell = 'tanh' +# self.act_cand = 'tanh' + +# self.has_initial_state = False +# self.is_reverse = True +# self.use_peepholes = False if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_lstmp_op.py b/python/paddle/fluid/tests/unittests/test_lstmp_op.py index afff133f6c..ed2262da4b 100644 --- a/python/paddle/fluid/tests/unittests/test_lstmp_op.py +++ b/python/paddle/fluid/tests/unittests/test_lstmp_op.py @@ -64,15 +64,17 @@ def lstmp( r = act_proj(r) return r, c - def _reverse(x, lod): + def _reverse(x, offset): y = np.zeros_like(x) - for i in range(len(lod) - 1): - b, e = lod[i], lod[i + 1] + for i in range(len(offset) - 1): + b, e = offset[i], offset[i + 1] y[b:e, :] = np.flip(x[b:e, :], 0) return y - offset = lod[0] - batch_size = len(offset) - 1 + offset = [0] + for l in lod[0]: + offset.append(offset[-1] + l) + batch_size = len(lod[0]) # recurrent projection state projection = [] cell = [] @@ -81,7 +83,7 @@ def lstmp( input = input + np.tile(w_b, (offset[-1], 1)) for i in range(batch_size): # compute one sequence - seq_len = offset[i + 1] - offset[i] + seq_len = lod[0][i] x = input[offset[i]:offset[i + 1], :] r_pre = np.dot(h0[i], w_rh) # 1 x P r_pre = act_proj(r_pre) @@ -117,8 +119,8 @@ class TestLstmpOp(LstmTest.TestLstmOp): self.reset_argument() self.op_type = 'lstmp' - T = self.lod[0][-1] - N = len(self.lod[0]) - 1 + T = sum(self.lod[0]) + N = len(self.lod[0]) x = np.random.normal(size=(T, 4 * self.D)).astype('float64') if self.has_initial_state: @@ -166,7 +168,7 @@ class TestLstmpOp(LstmTest.TestLstmOp): def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -183,7 +185,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): def test_check_grad(self): # TODO(qingqing) remove folowing lines after the check_grad is refined. - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -195,7 +197,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): max_relative_error=1e-2) def test_check_grad_ingore_bias(self): - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -207,7 +209,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): no_grad_set=set('Bias')) def test_check_grad_ingore_weight(self): - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -219,7 +221,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): no_grad_set=set('Weight')) def test_check_grad_ingore_proj_weight(self): - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -231,7 +233,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): no_grad_set=set('ProjWeight')) def test_check_grad_ingore_input(self): - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -243,7 +245,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): no_grad_set=set('Input')) def test_check_grad_ingore_h0(self): - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') @@ -255,7 +257,7 @@ class TestLstmpOpHasInitial(TestLstmpOp): no_grad_set=set('H0')) def test_check_grad_ingore_c0(self): - N = len(self.lod[0]) - 1 + N = len(self.lod[0]) self.outputs['OrderedP0'] = np.zeros((N, self.P)).astype('float64') self.outputs['BatchGate'] = np.zeros((N, 4 * self.D)).astype('float64') self.outputs['BatchHidden'] = np.zeros((N, self.D)).astype('float64') diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 44ac468389..cae2c8fa87 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -111,21 +111,24 @@ class Generator(object): # Generate test cases for all possibilities -for dim_X in [1, 2, 3]: - for dim_Y in [1, 2, 3]: - for transpose_X in [False, True]: - for transpose_Y in [False, True]: - test_name = ( - 'TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( - dim_X, dim_Y, transpose_X, transpose_Y)) - shape_X, shape_Y = generate_compatible_shapes( - dim_X, dim_Y, transpose_X, transpose_Y) - globals()[test_name] = type(test_name, (Generator, OpTest), { - 'shape_X': shape_X, - 'shape_Y': shape_Y, - 'transpose_X': transpose_X, - 'transpose_Y': transpose_Y, - }) +def inject_test(dim_x, dim_y, trans_x, trans_y): + test_name = ('TestMatMulOp_dimX_{}_dim_Y_{}_transX_{}_transY_{}'.format( + dim_x, dim_y, trans_x, trans_y)) + shape_x, shape_y = generate_compatible_shapes(dim_x, dim_y, trans_x, + trans_y) + globals()[test_name] = type(test_name, (Generator, OpTest), { + 'shape_X': shape_x, + 'shape_Y': shape_y, + 'transpose_X': trans_x, + 'transpose_Y': trans_y, + }) + + +for dim_X in (1, 2, 3): + for dim_Y in (1, 2, 3): + for transose_x in (False, True): + for transose_y in (False, True): + inject_test(dim_X, dim_Y, transose_x, transose_y) # Test case n-dim @@ -149,7 +152,7 @@ def generate_compatible_shapes(dim, transpose_X, transpose_Y): return shape_X, shape_Y -# Test case n-dim +# # Test case n-dim for dim in [4]: for transpose_X in [False, True]: for transpose_Y in [False, True]: diff --git a/python/paddle/fluid/tests/unittests/test_mean_iou.py b/python/paddle/fluid/tests/unittests/test_mean_iou.py new file mode 100644 index 0000000000..32b4ee1847 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_mean_iou.py @@ -0,0 +1,114 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import division +import unittest +import numpy as np +from op_test import OpTest + + +def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, + in_mean_ious): + assert predictions.shape == labels.shape + predictions = predictions.flatten() + labels = labels.flatten() + + out_wrong = np.zeros([num_classes]).astype("int32") + for _, wrong in in_wrongs: + out_wrong += wrong + out_correct = np.zeros([num_classes]).astype("int32") + for _, correct in in_corrects: + out_correct += correct + + for pred, label in zip(predictions, labels): + if pred == label: + out_correct[pred] += 1 + else: + out_wrong[pred] += 1 + out_wrong[label] += 1 + + denominator = out_wrong + out_correct + valid_count = (denominator != 0).sum() + denominator = np.where(denominator > 0, denominator, + np.ones(denominator.shape)) + mean_iou = (out_correct / denominator).sum() / valid_count + + for _, in_mean_iou in in_mean_ious: + mean_iou += in_mean_iou + return mean_iou, out_wrong, out_correct + + +class TestMeanIOUOp(OpTest): + def setUp(self): + self.config() + self.op_type = "mean_iou" + predictions = np.random.randint(0, self.num_classes, + self.image_size).astype("int32") + labels = np.random.randint(0, self.num_classes, + self.image_size).astype("int32") + + in_wrongs = [] + for i in range(self.in_wrong_num): + in_wrongs.append(("in_wrong_%d" % i, np.random.randint( + 0, 10, [self.num_classes]).astype("int32"))) + + in_corrects = [] + for i in range(self.in_correct_num): + in_corrects.append(("in_correct_%d" % i, np.random.randint( + 0, 10, [self.num_classes]).astype("int32"))) + + in_mean_ious = [] + for i in range(self.in_mean_iou_num): + in_mean_ious.append(("in_mean_iou_%d" % i, np.random.uniform( + 0, 1, [1]).astype("float32"))) + + self.inputs = { + 'Predictions': predictions, + 'Labels': labels, + 'InWrongs': in_wrongs, + 'InCorrects': in_corrects, + 'InMeanIou': in_mean_ious + } + self.attrs = {'num_classes': int(self.num_classes)} + mean_iou, out_wrong, out_correct = compute_mean_iou( + predictions, labels, self.num_classes, in_wrongs, in_corrects, + in_mean_ious) + self.outputs = { + 'OutMeanIou': mean_iou, + 'OutWrong': out_wrong, + 'OutCorrect': out_correct + } + + def config(self): + self.num_classes = 10 + self.image_size = [128, 128] + self.in_wrong_num = 0 + self.in_correct_num = 0 + self.in_mean_iou_num = 0 + + def test_check_output(self): + self.check_output() + + +class TestCase1(TestMeanIOUOp): + def config(self): + self.num_classes = 5 + self.image_size = [100, 128] + self.in_wrong_num = 2 + self.in_correct_num = 2 + self.in_mean_iou_num = 2 + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py index cfd6e63e12..67733807f8 100644 --- a/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py +++ b/python/paddle/fluid/tests/unittests/test_memory_optimization_transpiler.py @@ -43,5 +43,29 @@ class TestControlFlowGraph(unittest.TestCase): print(str(result_program)) +class TestMemoryTranspiler2(unittest.TestCase): + def setUp(self): + program = Program() + with program_guard(program, startup_program=Program()): + x = layers.data(name='x', shape=[13], dtype='float32') + fc = layers.fc(input=x, size=10, act=None) + reshape = layers.reshape(x=fc, shape=[-1, 2, 5]) + fc = layers.reshape(x=reshape, shape=[-1, 5, 2]) + y_predict = layers.fc(input=fc, size=1, act=None) + y = layers.data(name='y', shape=[1], dtype='float32') + cost = layers.square_error_cost(input=y_predict, label=y) + avg_cost = layers.mean(cost) + opt = optimizer.SGD(learning_rate=0.001) + opt.minimize(avg_cost) + self.program = program + + def test_inplace_ops(self): + print("before optimization") + print(str(self.program)) + result_program = memory_optimize(self.program) + print("after optimization") + print(str(result_program)) + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_memory_usage.py b/python/paddle/fluid/tests/unittests/test_memory_usage.py new file mode 100644 index 0000000000..f9daf83652 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_memory_usage.py @@ -0,0 +1,69 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import paddle +import paddle.fluid as fluid +import contextlib +import unittest + + +def train_simulator(test_batch_size=10): + if test_batch_size <= 0: + raise ValueError("batch_size should be a positive integeral value, " + "but got batch_size={}".format(test_batch_size)) + + x = fluid.layers.data(name='x', shape=[13], dtype='float32') + y_predict = fluid.layers.fc(input=x, size=1, act=None) + y = fluid.layers.data(name='y', shape=[1], dtype='float32') + + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + # Calculate memory usage in current network config + lower_usage, upper_usage, unit = fluid.contrib.memory_usage( + fluid.default_main_program(), batch_size=test_batch_size) + + print("memory usage is about %.3f - %.3f %s" % + (lower_usage, upper_usage, unit)) + + +class TestMemoryUsage(unittest.TestCase): + def test_with_unit_B(self): + with self.program_scope_guard(): + train_simulator() + + def test_with_unit_KB(self): + with self.program_scope_guard(): + train_simulator(test_batch_size=1000) + + def test_with_unit_MB(self): + with self.program_scope_guard(): + train_simulator(test_batch_size=100000) + + @contextlib.contextmanager + def program_scope_guard(self): + prog = fluid.Program() + startup_prog = fluid.Program() + scope = fluid.core.Scope() + with fluid.scope_guard(scope): + with fluid.program_guard(prog, startup_prog): + yield + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_merge_ids_op.py b/python/paddle/fluid/tests/unittests/test_merge_ids_op.py new file mode 100644 index 0000000000..f209bdf30f --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_merge_ids_op.py @@ -0,0 +1,38 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestMergeIdsOp(OpTest): + def setUp(self): + self.op_type = "merge_ids" + ids = np.array([[0], [2], [2], [3], [5], [5], [6]]).astype('int64') + x0 = np.array([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]]).astype('float32') + x1 = np.array([]).astype('float32') + x2 = np.array([[0.4, 0.5], [0.4, 0.5], [0.5, 0.6], + [0.5, 0.6]]).astype('float32') + out = np.array([[0.1, 0.2], [0.4, 0.5], [0.4, 0.5], [0.2, 0.3], + [0.5, 0.6], [0.5, 0.6], [0.3, 0.4]]).astype('float32') + self.inputs = {'Ids': ids, "X": [('x0', x0), ('x1', x1), ('x2', x2)]} + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py index c27573c3d6..54ee85c1a7 100644 --- a/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py +++ b/python/paddle/fluid/tests/unittests/test_mine_hard_examples_op.py @@ -70,7 +70,7 @@ class TestMineHardExamplesOp(OpTest): self.updated_match_indices = self.match_indices - self.neg_indices_lod = [[0, 1, 2]] + self.neg_indices_lod = [[1, 1]] self.neg_indices = np.array([[1], [0]]).astype('int32') @@ -92,7 +92,7 @@ class TestMineHardExamplesOpHardExample(TestMineHardExamplesOp): self.updated_match_indices = np.array([[0, -1, -1], [-1, -1, -1]]).astype('int32') - self.neg_indices_lod = [[0, 1, 3]] + self.neg_indices_lod = [[1, 2]] self.neg_indices = np.array([[2], [0], [2]]).astype('int32') diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index aaea9c1809..c75d3bd276 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -39,7 +39,7 @@ class TestMomentumOp1(OpTest): velocity_out = mu * velocity + grad if use_nesterov: - param_out = param - grad * learning_rate + \ + param_out = param - grad * learning_rate - \ velocity_out * mu * learning_rate else: param_out = param - learning_rate * velocity_out @@ -75,7 +75,7 @@ class TestMomentumOp2(OpTest): velocity_out = mu * velocity + grad if use_nesterov: - param_out = param - grad * learning_rate + \ + param_out = param - grad * learning_rate - \ velocity_out * mu * learning_rate else: param_out = param - learning_rate * velocity_out diff --git a/python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py deleted file mode 100644 index 42d68ef376..0000000000 --- a/python/paddle/fluid/tests/unittests/test_mul_mkldnn_op.py +++ /dev/null @@ -1,44 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest -from test_mul_op import TestMulOp, TestMulOp2, TestFP16MulOp1, TestFP16MulOp2 - - -class TestMKLDNNMulOp(TestMulOp): - def init_op_test(self): - super(TestMKLDNNMulOp, self).setUp() - self.attrs = {"use_mkldnn": True} - - -class TestMKLDNNMulOp2(TestMulOp2): - def init_op_test(self): - super(TestMKLDNNMulOp2, self).setUp() - self.attrs = {"use_mkldnn": True} - - -class TestMKLDNNFP16MulOp1(TestFP16MulOp1): - def init_op_test(self): - super(TestMKLDNNFP16MulOp1, self).setUp() - self.attrs = {"use_mkldnn": True} - - -class TestMKLDNNFP16MulOp2(TestFP16MulOp2): - def init_op_test(self): - super(TestMKLDNNFP16MulOp2, self).setUp() - self.attrs = {"use_mkldnn": True} - - -if __name__ == "__main__": - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_mul_op.py b/python/paddle/fluid/tests/unittests/test_mul_op.py index d984393c89..bbc782c1bc 100644 --- a/python/paddle/fluid/tests/unittests/test_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_mul_op.py @@ -21,12 +21,10 @@ from op_test import OpTest class TestMulOp(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False self.inputs = { - 'X': np.random.random((32, 84)).astype("float32"), - 'Y': np.random.random((84, 100)).astype("float32") + 'X': np.random.random((2, 5)).astype("float32"), + 'Y': np.random.random((5, 3)).astype("float32") } - self.attrs = {'use_mkldnn': self.use_mkldnn} self.outputs = {'Out': np.dot(self.inputs['X'], self.inputs['Y'])} def test_check_output(self): @@ -47,19 +45,17 @@ class TestMulOp(OpTest): class TestMulOp2(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False self.inputs = { - 'X': np.random.random((15, 4, 12, 10)).astype("float32"), - 'Y': np.random.random((4, 30, 8, 2, 9)).astype("float32") + 'X': np.random.random((3, 4, 4, 3)).astype("float32"), + 'Y': np.random.random((2, 6, 1, 2, 3)).astype("float32") } self.attrs = { 'x_num_col_dims': 2, 'y_num_col_dims': 2, - 'use_mkldnn': self.use_mkldnn } - result = np.dot(self.inputs['X'].reshape(15 * 4, 12 * 10), - self.inputs['Y'].reshape(4 * 30, 8 * 2 * 9)) - result = result.reshape(15, 4, 8, 2, 9) + result = np.dot(self.inputs['X'].reshape(3 * 4, 4 * 3), + self.inputs['Y'].reshape(2 * 6, 1 * 2 * 3)) + result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} def test_check_output(self): @@ -80,11 +76,9 @@ class TestMulOp2(OpTest): class TestFP16MulOp1(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False - x = np.random.random((32, 84)).astype("float16") - y = np.random.random((84, 100)).astype("float16") - self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} - self.attrs = {'use_mkldnn': self.use_mkldnn} + x = np.random.random((3, 5)).astype("float16") + y = np.random.random((5, 4)).astype("float16") + self.inputs = {'X': x.view(np.float16), 'Y': y.view(np.float16)} self.outputs = {'Out': np.dot(x, y)} def test_check_output(self): @@ -97,18 +91,15 @@ class TestFP16MulOp1(OpTest): class TestFP16MulOp2(OpTest): def setUp(self): self.op_type = "mul" - self.use_mkldnn = False - x = np.random.random((15, 4, 12, 10)).astype("float16") - y = np.random.random((4, 30, 8, 2, 9)).astype("float16") - self.inputs = {'X': x.view(np.uint16), 'Y': y.view(np.uint16)} + x = np.random.random((3, 4, 4, 3)).astype("float16") + y = np.random.random((2, 6, 1, 2, 3)).astype("float16") + self.inputs = {'X': x.view(np.float16), 'Y': y.view(np.float16)} self.attrs = { 'x_num_col_dims': 2, 'y_num_col_dims': 2, - 'use_mkldnn': self.use_mkldnn } - result = np.dot( - x.reshape(15 * 4, 12 * 10), y.reshape(4 * 30, 8 * 2 * 9)) - result = result.reshape(15, 4, 8, 2, 9) + result = np.dot(x.reshape(3 * 4, 4 * 3), y.reshape(2 * 6, 1 * 2 * 3)) + result = result.reshape(3, 4, 1, 2, 3) self.outputs = {'Out': result} def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_multi_file_reader.py b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py index 3f940203b9..cb0ea96ff6 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_file_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_file_reader.py @@ -39,17 +39,17 @@ class TestMultipleReader(unittest.TestCase): copyfile('./mnist_0.recordio', './mnist_1.recordio') copyfile('./mnist_0.recordio', './mnist_2.recordio') - def main(self, thread_num): + def main(self, is_test=False): file_list = [ './mnist_0.recordio', './mnist_1.recordio', './mnist_2.recordio' ] with fluid.program_guard(fluid.Program(), fluid.Program()): data_files = fluid.layers.open_files( filenames=file_list, - thread_num=thread_num, shapes=[(-1, 784), (-1, 1)], lod_levels=[0, 0], - dtypes=['float32', 'int64']) + dtypes=['float32', 'int64'], + is_test=is_test) img, label = fluid.layers.read_file(data_files) if fluid.core.is_compiled_with_cuda(): @@ -64,14 +64,16 @@ class TestMultipleReader(unittest.TestCase): while True: try: img_val, = exe.run(fetch_list=[img]) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) self.assertEqual(batch_count, self.num_batch * 3) def test_main(self): - self.main(thread_num=3) # thread number equals to file number - self.main(thread_num=10) # thread number is larger than file number - self.main(thread_num=2) # thread number is less than file number + self.main(is_test=False) + self.main(is_test=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py index 52e7cc1ffb..7fc9f55044 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py +++ b/python/paddle/fluid/tests/unittests/test_multi_pass_reader.py @@ -59,8 +59,7 @@ class TestMultipleReader(unittest.TestCase): while True: try: img_val, = exe.run(fetch_list=[img]) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break batch_count += 1 self.assertLessEqual(img_val.shape[0], self.batch_size) diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index 6459913c01..10cb78a08d 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -112,7 +112,7 @@ def multiclass_nms(boxes, scores, background, score_threshold, nms_threshold, if keep_top_k > -1 and num_det > keep_top_k: score_index = [] - for c, indices in selected_indices.iteritems(): + for c, indices in selected_indices.items(): for idx in indices: score_index.append((scores[c][idx], c, idx)) @@ -135,15 +135,15 @@ def batched_multiclass_nms(boxes, scores, background, score_threshold, batch_size = scores.shape[0] det_outs = [] - lod = [0] + lod = [] for n in range(batch_size): nmsed_outs, nmsed_num = multiclass_nms(boxes[n], scores[n], background, score_threshold, nms_threshold, nms_top_k, keep_top_k) - lod.append(lod[-1] + nmsed_num) + lod.append(nmsed_num) if nmsed_num == 0: continue - for c, indices in nmsed_outs.iteritems(): + for c, indices in nmsed_outs.items(): for idx in indices: xmin, ymin, xmax, ymax = boxes[n][idx][:] det_outs.append([c, scores[n][c][idx], xmin, ymin, xmax, ymax]) diff --git a/python/paddle/fluid/tests/unittests/test_nce.py b/python/paddle/fluid/tests/unittests/test_nce.py index 76ecc8ba08..7431a142c5 100644 --- a/python/paddle/fluid/tests/unittests/test_nce.py +++ b/python/paddle/fluid/tests/unittests/test_nce.py @@ -66,7 +66,7 @@ class TestNCE(OpTest): self.attrs = { 'num_total_classes': num_classes, 'num_neg_samples': num_neg_samples, - 'custom_neg_classes': range(num_neg_samples) + 'custom_neg_classes': list(range(num_neg_samples)) } self.inputs = { 'Input': input, diff --git a/python/paddle/fluid/tests/unittests/test_network_with_dtype.py b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py new file mode 100644 index 0000000000..d4835dd184 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_network_with_dtype.py @@ -0,0 +1,74 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +import numpy as np +import paddle +import paddle.fluid as fluid +import paddle.fluid.core as core +from paddle.fluid.executor import Executor + +BATCH_SIZE = 20 + + +class TestNetWithDtype(unittest.TestCase): + def setUp(self): + self.dtype = "float64" + self.init_dtype() + + def run_net_on_place(self, place): + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + x = fluid.layers.data(name='x', shape=[13], dtype=self.dtype) + y = fluid.layers.data(name='y', shape=[1], dtype=self.dtype) + y_predict = fluid.layers.fc(input=x, size=1, act=None) + cost = fluid.layers.square_error_cost(input=y_predict, label=y) + avg_cost = fluid.layers.mean(cost) + sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001) + sgd_optimizer.minimize(avg_cost) + + fetch_list = [avg_cost] + train_reader = paddle.batch( + paddle.dataset.uci_housing.train(), batch_size=BATCH_SIZE) + feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) + exe = fluid.Executor(place) + exe.run(startup) + for data in train_reader(): + exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list) + # the main program is runable, the datatype is fully supported + break + + def init_dtype(self): + pass + + def test_cpu(self): + place = fluid.CPUPlace() + self.run_net_on_place(place) + + def test_gpu(self): + if not core.is_compiled_with_cuda(): + return + place = fluid.CUDAPlace(0) + self.run_net_on_place(place) + + +# TODO(dzhwinter): make sure the fp16 is runable +# class TestFloat16(TestNetWithDtype): +# def init_dtype(self): +# self.dtype = "float16" + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index 6feda175fb..108a665f37 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -17,44 +17,23 @@ import numpy as np from op_test import OpTest -def norm(input, scale, epsilon): - s0, s1, s2, s3 = input.shape - x_square = input * input - for i in xrange(s0): - input_batch = input[i:i + 1, :, :, :] - input_batch = input_batch.reshape(s1, s2 * s3) - x_square_batch = x_square[i:i + 1, :, :, :] - x_square_batch = x_square_batch.reshape(s1, s2 * s3) - square_colsum = x_square_batch.sum(axis=0) + epsilon - tmp = pow(square_colsum, 0.5) - tmp = np.reciprocal(tmp) - tmp_tile = np.tile(tmp, s1) - tmp_tile = tmp_tile.reshape(s1, s2 * s3) - scale_tile = np.tile(scale, (1, s2 * s3)) - scale_tile = scale_tile.reshape(s1, s2 * s3) - out_batch = input_batch * tmp_tile * scale_tile - out_batch = out_batch.reshape(1, s1, s2, s3) - if i == 0: - out = out_batch - else: - out = np.concatenate((out, out_batch), 0) - out.reshape(s0, s1, s2, s3) - return out +def l2_norm(x, axis, epsilon): + x2 = x**2 + s = np.sum(x2, axis=axis, keepdims=True) + r = np.sqrt(s + epsilon) + y = x / np.broadcast_to(r, x.shape) + return y, r class TestNormOp(OpTest): def setUp(self): self.op_type = "norm" self.init_test_case() - input = np.random.random(self.shape).astype("float32") - scale = np.array([10, 10, 10]) - self.inputs = { - 'X': input.astype('float32'), - 'Scale': scale.astype('float32') - } - self.attrs = {'epsilon': self.epsilon} - output = norm(input, scale, self.epsilon) - self.outputs = {'Out': output.astype('float32')} + x = np.random.random(self.shape).astype("float64") + y, norm = l2_norm(x, self.axis, self.epsilon) + self.inputs = {'X': x} + self.attrs = {'epsilon': self.epsilon, 'axis': self.axis} + self.outputs = {'Out': y, 'Norm': norm} def test_check_output(self): self.check_output() @@ -63,8 +42,23 @@ class TestNormOp(OpTest): self.check_grad(['X'], 'Out') def init_test_case(self): - self.shape = [2, 3, 2, 2] - self.epsilon = 1e-6 + self.shape = [2, 3, 4, 4] + self.axis = 1 + self.epsilon = 1e-8 + + +class TestNormOp2(TestNormOp): + def init_test_case(self): + self.shape = [5, 3, 9, 7] + self.axis = 0 + self.epsilon = 1e-8 + + +class TestNormOp3(TestNormOp): + def init_test_case(self): + self.shape = [5, 3, 2, 7] + self.axis = -1 + self.epsilon = 1e-8 if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py index ef34893943..198c68866d 100644 --- a/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py +++ b/python/paddle/fluid/tests/unittests/test_normalization_wrapper.py @@ -70,8 +70,9 @@ class TestNormalization(unittest.TestCase): def l2_normalize(self, data, axis, epsilon): """ Compute the groundtruth. """ - output = data * np.reciprocal( - np.sum(np.square(data), axis=axis, keepdims=True)) + output = data / np.broadcast_to( + np.sqrt(np.sum(np.square(data), axis=axis, keepdims=True)), + data.shape) return output def test_l2_normalize(self): diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_op.py index cd78cce872..06fccd39ac 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_op.py @@ -27,14 +27,14 @@ class TestOneHotOp(OpTest): self.op_type = 'one_hot' depth = 10 dimension = 12 - x_lod = [[0, 4, 5, 8, 11]] - x = [np.random.randint(0, depth - 1) for i in xrange(x_lod[0][-1])] - x = np.array(x).astype('int').reshape([x_lod[0][-1], 1]) + x_lod = [[4, 1, 3, 3]] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] + x = np.array(x).astype('int').reshape([sum(x_lod[0]), 1]) out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype('float32') - for i in xrange(np.product(x.shape)): + for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -50,14 +50,14 @@ class TestOneHotOp_default_dtype(OpTest): self.op_type = 'one_hot' depth = 10 dimension = 12 - x_lod = [[0, 4, 5, 8, 11]] - x = [np.random.randint(0, depth - 1) for i in xrange(x_lod[0][-1])] - x = np.array(x).astype('int').reshape([x_lod[0][-1], 1]) + x_lod = [[4, 1, 3, 3]] + x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] + x = np.array(x).astype('int').reshape([sum(x_lod[0]), 1]) out = np.zeros(shape=(np.product(x.shape[:-1]), depth)).astype('float32') - for i in xrange(np.product(x.shape)): + for i in range(np.product(x.shape)): out[i, x[i]] = 1.0 self.inputs = {'X': (x, x_lod)} @@ -75,11 +75,11 @@ class TestOneHotOp_exception(OpTest): self.place = core.CPUPlace() self.dimension = 12 self.x = core.LoDTensor() - x_lod = [[0, 4, 5, 8, 11]] - data = [np.random.randint(11, 20) for i in xrange(x_lod[0][-1])] - data = np.array(data).astype('int').reshape([x_lod[0][-1], 1]) + x_lod = [[4, 1, 3, 3]] + data = [np.random.randint(11, 20) for i in range(sum(x_lod[0]))] + data = np.array(data).astype('int').reshape([sum(x_lod[0]), 1]) self.x.set(data, self.place) - self.x.set_lod(x_lod) + self.x.set_recursive_sequence_lengths(x_lod) def test_check_output(self): program = Program() diff --git a/python/paddle/fluid/tests/unittests/test_operator_desc.py b/python/paddle/fluid/tests/unittests/test_operator_desc.py index 779ae388f0..c098a5a0cb 100644 --- a/python/paddle/fluid/tests/unittests/test_operator_desc.py +++ b/python/paddle/fluid/tests/unittests/test_operator_desc.py @@ -63,7 +63,7 @@ class TestOperator(unittest.TestCase): self.assertEqual(mul_op.output("Out"), ["mul.out"]) self.assertEqual( set(mul_op.attr_names), - set(["x_num_col_dims", "y_num_col_dims", "use_mkldnn"])) + set(["x_num_col_dims", "y_num_col_dims", "op_role", "op_role_var"])) self.assertEqual(mul_op.has_attr("x_num_col_dims"), True) self.assertEqual(mul_op.attr_type("x_num_col_dims"), core.AttrType.INT) self.assertEqual(mul_op.attr("x_num_col_dims"), 1) diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index e775db1d10..18921d727f 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -97,7 +97,7 @@ class TestMomentumOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) - opts = momentum_optimizer.create_optimization_pass( + opts = momentum_optimizer._create_optimization_pass( params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) sgd_op = opts[-1] @@ -151,7 +151,7 @@ class TestMomentumOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(momentum_optimizer.get_accumulators()), 0) - opts = momentum_optimizer.create_optimization_pass( + opts = momentum_optimizer._create_optimization_pass( params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) sgd_op = opts[-1] @@ -214,8 +214,8 @@ class TestAdagradOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adagrad_optimizer.get_accumulators()), 0) - opts = adagrad_optimizer.create_optimization_pass(params_grads, mul_out, - init_program) + opts = adagrad_optimizer._create_optimization_pass( + params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) self.assertEqual([op.type for op in opts], ["fill_constant", "elementwise_mul", "adagrad"]) @@ -278,8 +278,8 @@ class TestAdamOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adam_optimizer.get_accumulators()), 0) - opts = adam_optimizer.create_optimization_pass(params_grads, mul_out, - init_program) + opts = adam_optimizer._create_optimization_pass(params_grads, mul_out, + init_program) self.assertEqual(len(opts), 5) self.assertEqual( [op.type for op in opts], @@ -287,7 +287,7 @@ class TestAdamOptimizer(unittest.TestCase): # Check accumulators accumulators = adam_optimizer.get_accumulators() - self.assertEqual(len(accumulators), 2) + self.assertEqual(len(accumulators), 4) self.assertTrue(adam_optimizer.get_moment1_str() in accumulators) self.assertTrue(adam_optimizer.get_moment2_str() in accumulators) moment1_acc = accumulators[adam_optimizer.get_moment1_str()] @@ -345,8 +345,8 @@ class TestAdamaxOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(adamax_optimizer.get_accumulators()), 0) - opts = adamax_optimizer.create_optimization_pass(params_grads, mul_out, - init_program) + opts = adamax_optimizer._create_optimization_pass(params_grads, mul_out, + init_program) self.assertEqual(len(opts), 4) self.assertEqual( [op.type for op in opts], @@ -354,7 +354,7 @@ class TestAdamaxOptimizer(unittest.TestCase): # Check accumulators accumulators = adamax_optimizer.get_accumulators() - self.assertEqual(len(accumulators), 2) + self.assertEqual(len(accumulators), 3) self.assertTrue(adamax_optimizer.get_moment_str() in accumulators) self.assertTrue(adamax_optimizer.get_inf_norm_str() in accumulators) moment_acc = accumulators[adamax_optimizer.get_moment_str()] @@ -409,7 +409,7 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): params_grads = append_backward(mean_out) self.assertEqual(len(params_grads), 1) self.assertEqual(len(decayed_adagrad_optimizer.get_accumulators()), 0) - opts = decayed_adagrad_optimizer.create_optimization_pass( + opts = decayed_adagrad_optimizer._create_optimization_pass( params_grads, mul_out, init_program) self.assertEqual(len(opts), 3) self.assertEqual( @@ -434,5 +434,71 @@ class TestDecayedAdagradOptimizer(unittest.TestCase): self.assertAlmostEqual(init_ops[1].attr('value'), 0.0) +class TestFtrlOptimizer(unittest.TestCase): + class MockFtrl(optimizer.FtrlOptimizer): + def get_accumulators(self): + return self._accumulators + + def get_squared_str(self): + return self._squared_acc_str + + def get_linear_str(self): + return self._linear_acc_str + + def test_ftrl_optimizer(self): + init_program = framework.Program() + program = framework.Program() + block = program.global_block() + mul_x = block.create_parameter( + dtype="float32", + shape=[5, 10], + lod_level=0, + name="mul.x", + optimize_attr={'learning_rate': 1.1}) + mul_y = block.create_var( + dtype="float32", shape=[10, 8], lod_level=0, name="mul.y") + mul_out = block.create_var( + dtype="float32", shape=[5, 8], lod_level=0, name="mul.out") + block.append_op( + type="mul", + inputs={"X": mul_x, + "Y": mul_y}, + outputs={"Out": mul_out}, + attrs={"x_num_col_dims": 1}) + mean_out = block.create_var( + dtype="float32", shape=[1], lod_level=0, name="mean.out") + block.append_op( + type="mean", inputs={"X": mul_out}, outputs={"Out": mean_out}) + learning_rate = 0.01 + ftrl_optimizer = self.MockFtrl( + learning_rate=learning_rate, l1=0.0, l2=0.0, lr_power=-0.5) + params_grads = append_backward(mean_out) + self.assertEqual(len(params_grads), 1) + self.assertEqual(len(ftrl_optimizer.get_accumulators()), 0) + opts = ftrl_optimizer._create_optimization_pass(params_grads, mul_out, + init_program) + self.assertEqual(len(opts), 3) + self.assertEqual([op.type for op in opts], + ["fill_constant", "elementwise_mul", "ftrl"]) + + # Check accumulators + accumulators = ftrl_optimizer.get_accumulators() + self.assertEqual(len(accumulators), 2) + self.assertTrue(ftrl_optimizer.get_squared_str() in accumulators) + self.assertTrue(ftrl_optimizer.get_linear_str() in accumulators) + squared_acc = accumulators[ftrl_optimizer.get_squared_str()] + linear_acc = accumulators[ftrl_optimizer.get_linear_str()] + self.assertEqual(len(squared_acc), 1) + self.assertEqual(len(linear_acc), 1) + self.assertTrue(mul_x.name in squared_acc) + self.assertTrue(mul_x.name in linear_acc) + + # Check init_program + init_ops = init_program.global_block().ops + self.assertEqual(len(init_ops), 3) + self.assertEqual(init_ops[0].type, "fill_constant") + self.assertAlmostEqual(init_ops[0].attr('value'), learning_rate) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor.py b/python/paddle/fluid/tests/unittests/test_parallel_executor.py deleted file mode 100644 index 4eb25a6e00..0000000000 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor.py +++ /dev/null @@ -1,800 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import numpy as np -import unittest - -import paddle.fluid as fluid -import paddle -import paddle.dataset.mnist as mnist -import paddle.dataset.wmt16 as wmt16 - - -def simple_fc_net(use_feed): - if use_feed: - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - else: - reader = fluid.layers.open_files( - filenames=['./mnist.recordio'], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - thread_num=1, - for_parallel=True) - reader = fluid.layers.io.double_buffer(reader) - img, label = fluid.layers.read_file(reader) - hidden = img - for _ in xrange(4): - hidden = fluid.layers.fc( - hidden, - size=200, - act='tanh', - bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0))) - prediction = fluid.layers.fc(hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - loss = fluid.layers.mean(loss) - return loss - - -def fc_with_batchnorm(use_feed): - if use_feed: - img = fluid.layers.data(name='image', shape=[784], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - else: - reader = fluid.layers.open_files( - filenames=['mnist.recordio'], - shapes=[[-1, 784], [-1, 1]], - lod_levels=[0, 0], - dtypes=['float32', 'int64'], - thread_num=1, - for_parallel=True) - reader = fluid.layers.io.double_buffer(reader) - img, label = fluid.layers.read_file(reader) - - hidden = img - for _ in xrange(1): - hidden = fluid.layers.fc( - hidden, - size=200, - act='tanh', - bias_attr=fluid.ParamAttr( - initializer=fluid.initializer.Constant(value=1.0))) - - hidden = fluid.layers.batch_norm(input=hidden) - - prediction = fluid.layers.fc(hidden, size=10, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - loss = fluid.layers.mean(loss) - return loss - - -def squeeze_excitation(input, num_channels, reduction_ratio): - # pool = fluid.layers.pool2d( - # input=input, pool_size=0, pool_type='avg', global_pooling=True) - conv = input - shape = conv.shape - reshape = fluid.layers.reshape( - x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) - pool = fluid.layers.reduce_mean(input=reshape, dim=2) - - squeeze = fluid.layers.fc(input=pool, - size=num_channels / reduction_ratio, - act='relu') - excitation = fluid.layers.fc(input=squeeze, - size=num_channels, - act='sigmoid') - scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) - return scale - - -def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, - act=None): - conv = fluid.layers.conv2d( - input=input, - num_filters=num_filters, - filter_size=filter_size, - stride=stride, - padding=(filter_size - 1) / 2, - groups=groups, - act=None, - bias_attr=False) - return fluid.layers.batch_norm(input=conv, act=act, momentum=0.1) - - -def shortcut(input, ch_out, stride): - ch_in = input.shape[1] - if ch_in != ch_out: - if stride == 1: - filter_size = 1 - else: - filter_size = 3 - return conv_bn_layer(input, ch_out, filter_size, stride) - else: - return input - - -def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): - # The number of first 1x1 convolutional channels for each bottleneck build block - # was halved to reduce the compution cost. - conv0 = conv_bn_layer( - input=input, num_filters=num_filters, filter_size=1, act='relu') - conv1 = conv_bn_layer( - input=conv0, - num_filters=num_filters * 2, - filter_size=3, - stride=stride, - groups=cardinality, - act='relu') - conv2 = conv_bn_layer( - input=conv1, num_filters=num_filters * 2, filter_size=1, act=None) - scale = squeeze_excitation( - input=conv2, - num_channels=num_filters * 2, - reduction_ratio=reduction_ratio) - - short = shortcut(input, num_filters * 2, stride) - - return fluid.layers.elementwise_add(x=short, y=scale, act='relu') - - -def SE_ResNeXt50Small(batch_size=2, use_feed=False): - assert not use_feed, "SE_ResNeXt doesn't support feed yet" - - img = fluid.layers.fill_constant( - shape=[batch_size, 3, 224, 224], dtype='float32', value=0.0) - label = fluid.layers.fill_constant( - shape=[batch_size, 1], dtype='int64', value=0.0) - - conv = conv_bn_layer( - input=img, num_filters=16, filter_size=3, stride=2, act='relu') - conv = conv_bn_layer( - input=conv, num_filters=16, filter_size=3, stride=1, act='relu') - conv = conv_bn_layer( - input=conv, num_filters=16, filter_size=3, stride=1, act='relu') - conv = fluid.layers.pool2d( - input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') - - cardinality = 32 - reduction_ratio = 16 - depth = [3, 4, 6, 3] - num_filters = [128, 256, 512, 1024] - - for block in range(len(depth)): - for i in range(depth[block]): - conv = bottleneck_block( - input=conv, - num_filters=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - cardinality=cardinality, - reduction_ratio=reduction_ratio) - - shape = conv.shape - reshape = fluid.layers.reshape( - x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) - pool = fluid.layers.reduce_mean(input=reshape, dim=2) - dropout = fluid.layers.dropout(x=pool, dropout_prob=0.2) - # Classifier layer: - prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax') - loss = fluid.layers.cross_entropy(input=prediction, label=label) - loss = fluid.layers.mean(loss) - return loss - - -import time - - -class TestParallelExecutorBase(unittest.TestCase): - def check_network_convergence(self, - method, - memory_opt=True, - iter=50, - batch_size=None, - allow_op_delay=False, - feed_dict=None, - seed=None, - use_parallel_executor=True): - def run_executor(exe, feed, fetch_list, program=None): - if isinstance(exe, fluid.ParallelExecutor): - res = exe.run(fetch_list=fetch_list, feed=feed) - elif isinstance(exe, fluid.Executor): - if program is None: - program = fluid.default_main_program() - res = exe.run(program=program, feed=feed, fetch_list=fetch_list) - else: - raise ValueError('Unkown type exe') - return res - - main = fluid.Program() - startup = fluid.Program() - startup.random_seed = 1 # Fix random seed - with fluid.program_guard(main, startup): - if seed is not None: - startup.random_seed = seed - loss = method(use_feed=feed_dict is not None) - adam = fluid.optimizer.Adam() - adam.minimize(loss) - if memory_opt: - fluid.memory_optimize(main) - place = fluid.CUDAPlace(0) - startup_exe = fluid.Executor(place) - startup_exe.run(startup) - - if use_parallel_executor: - exe = fluid.ParallelExecutor( - True, loss_name=loss.name, allow_op_delay=allow_op_delay) - else: - exe = fluid.Executor(place=place) - - if batch_size is not None: - batch_size *= fluid.core.get_cuda_device_count() - begin = time.time() - first_loss, = run_executor( - exe=exe, feed=feed_dict, fetch_list=[loss.name]) - first_loss = np.array(first_loss) - - for i in xrange(iter): - run_executor(exe=exe, feed=feed_dict, fetch_list=[]) - - last_loss, = run_executor( - exe=exe, feed=feed_dict, fetch_list=[loss.name]) - end = time.time() - - if batch_size is not None: - print "%.4f Instance per second" % ( - (batch_size * iter + 2) / (end - begin)) - - last_loss = np.array(last_loss) - - print first_loss, last_loss - # self.assertGreater(first_loss[0], last_loss[0]) - return first_loss, last_loss - - -class TestMNIST(TestParallelExecutorBase): - @classmethod - def setUpClass(cls): - # Convert mnist to recordio file - with fluid.program_guard(fluid.Program(), fluid.Program()): - reader = paddle.batch(mnist.train(), batch_size=4) - feeder = fluid.DataFeeder( - feed_list=[ # order is image and label - fluid.layers.data( - name='image', shape=[784]), - fluid.layers.data( - name='label', shape=[1], dtype='int64'), - ], - place=fluid.CPUPlace()) - fluid.recordio_writer.convert_reader_to_recordio_file( - './mnist.recordio', reader, feeder) - - def check_simple_fc_convergence(self): - self.check_network_convergence(simple_fc_net) - self.check_network_convergence(simple_fc_net, allow_op_delay=True) - - img = np.zeros(shape=[32, 784], dtype='float32') - label = np.ones(shape=[32, 1], dtype='int64') - self.check_network_convergence( - simple_fc_net, feed_dict={"image": img, - "label": label}) - - def test_simple_fc(self): - self.check_simple_fc_convergence() - - def check_simple_fc_parallel_accuracy(self): - img = np.zeros(shape=[32, 784], dtype='float32') - label = np.ones(shape=[32, 1], dtype='int64') - single_first_loss, single_last_loss = self.check_network_convergence( - method=simple_fc_net, - seed=1000, - feed_dict={"image": img, - "label": label}, - use_parallel_executor=False) - parallel_first_loss, parallel_last_loss = self.check_network_convergence( - method=simple_fc_net, - seed=1000, - feed_dict={"image": img, - "label": label}, - use_parallel_executor=True) - - for p_f in parallel_first_loss: - self.assertAlmostEquals(p_f, single_first_loss[0], delta=1e-6) - for p_l in parallel_last_loss: - self.assertAlmostEquals(p_l, single_last_loss[0], delta=1e-6) - - def test_simple_fc_parallel_accuracy(self): - self.check_simple_fc_parallel_accuracy() - - def check_batchnorm_fc_convergence(self): - self.check_network_convergence(fc_with_batchnorm) - img = np.zeros(shape=[32, 784], dtype='float32') - label = np.ones(shape=[32, 1], dtype='int64') - self.check_network_convergence( - fc_with_batchnorm, feed_dict={"image": img, - "label": label}) - - def test_batchnorm_fc(self): - self.check_batchnorm_fc_convergence() - - -class TestResnet(TestParallelExecutorBase): - # @classmethod - # def setUpClass(cls): - # # import os - # # if os.path.exists('./flowers.recordio'): - # # return - # with fluid.program_guard(fluid.Program(), fluid.Program()): - # reader = paddle.batch(flowers.train(), batch_size=4) - # feeder = fluid.DataFeeder( - # feed_list=[ - # fluid.layers.data( - # name='image', shape=[3, 224, 224]), - # fluid.layers.data( - # name='label', shape=[1], dtype='int64'), - # ], - # place=fluid.CPUPlace()) - # fluid.recordio_writer.convert_reader_to_recordio_file( - # "./flowers.recordio", reader, feeder, compressor=fluid.core.RecordIOWriter.Compressor.NoCompress) - - def check_resnet_convergence(self): - import functools - batch_size = 2 - self.check_network_convergence( - functools.partial( - SE_ResNeXt50Small, batch_size=batch_size), - iter=20, - batch_size=batch_size) - - def test_resnet(self): - self.check_resnet_convergence() - - -class ModelHyperParams(object): - # Dictionary size for source and target language. This model directly uses - # paddle.dataset.wmt16 in which , and token has - # alreay been added, but the token is not added. Transformer requires - # sequences in a mini-batch are padded to have the same length. A token is - # added into the original dictionary in paddle.dateset.wmt16. - - # size of source word dictionary. - src_vocab_size = 10000 - # index for token in source language. - src_pad_idx = src_vocab_size - - # size of target word dictionay - trg_vocab_size = 10000 - # index for token in target language. - trg_pad_idx = trg_vocab_size - - # position value corresponding to the token. - pos_pad_idx = 0 - - # max length of sequences. It should plus 1 to include position - # padding token for position encoding. - max_length = 50 - - # the dimension for word embeddings, which is also the last dimension of - # the input and output of multi-head attention, position-wise feed-forward - # networks, encoder and decoder. - - d_model = 512 - # size of the hidden layer in position-wise feed-forward networks. - d_inner_hid = 1024 - # the dimension that keys are projected to for dot-product attention. - d_key = 64 - # the dimension that values are projected to for dot-product attention. - d_value = 64 - # number of head used in multi-head attention. - n_head = 8 - # number of sub-layers to be stacked in the encoder and decoder. - n_layer = 6 - # dropout rate used by all dropout layers. - dropout = 0.1 - - -def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): - """ - Pad the instances to the max sequence length in batch, and generate the - corresponding position data and attention bias. Then, convert the numpy - data to tensors and return a dict mapping names to tensors. - """ - - def __pad_batch_data(insts, - pad_idx, - is_target=False, - return_pos=True, - return_attn_bias=True, - return_max_len=True): - """ - Pad the instances to the max sequence length in batch, and generate the - corresponding position data and attention bias. - """ - return_list = [] - max_len = max(len(inst) for inst in insts) - inst_data = np.array( - [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) - return_list += [inst_data.astype("int64").reshape([-1, 1])] - if return_pos: - inst_pos = np.array([[ - pos_i + 1 if w_i != pad_idx else 0 - for pos_i, w_i in enumerate(inst) - ] for inst in inst_data]) - - return_list += [inst_pos.astype("int64").reshape([-1, 1])] - if return_attn_bias: - if is_target: - # This is used to avoid attention on paddings and subsequent - # words. - slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, - max_len)) - slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( - [-1, 1, max_len, max_len]) - slf_attn_bias_data = np.tile(slf_attn_bias_data, - [1, n_head, 1, 1]) * [-1e9] - else: - # This is used to avoid attention on paddings. - slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * - (max_len - len(inst)) - for inst in insts]) - slf_attn_bias_data = np.tile( - slf_attn_bias_data.reshape([-1, 1, 1, max_len]), - [1, n_head, max_len, 1]) - return_list += [slf_attn_bias_data.astype("float32")] - if return_max_len: - return_list += [max_len] - return return_list if len(return_list) > 1 else return_list[0] - - def data_to_tensor(data_list, name_list, input_dict, place): - assert len(data_list) == len(name_list) - for i in range(len(name_list)): - tensor = fluid.LoDTensor() - tensor.set(data_list[i], place) - input_dict[name_list[i]] = tensor - - src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data( - [inst[0] for inst in insts], src_pad_idx, is_target=False) - trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data( - [inst[1] for inst in insts], trg_pad_idx, is_target=True) - trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], - [1, 1, trg_max_len, 1]).astype("float32") - lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False, - False, False, False) - lbl_weight = (lbl_word != trg_pad_idx).astype("float32").reshape([-1, 1]) - - return [ - src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, - trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight - ] - - -import transformer_model - - -def transformer(use_feed): - assert not use_feed, "transfomer doesn't support feed yet" - return transformer_model.transformer( - ModelHyperParams.src_vocab_size + 1, - ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1, - ModelHyperParams.n_layer, ModelHyperParams.n_head, - ModelHyperParams.d_key, ModelHyperParams.d_value, - ModelHyperParams.d_model, ModelHyperParams.d_inner_hid, - ModelHyperParams.dropout, ModelHyperParams.src_pad_idx, - ModelHyperParams.trg_pad_idx, ModelHyperParams.pos_pad_idx) - - -class TestTransformer(TestParallelExecutorBase): - @classmethod - def setUpClass(cls): - reader = paddle.batch( - wmt16.train(ModelHyperParams.src_vocab_size, - ModelHyperParams.trg_vocab_size), - batch_size=transformer_model.batch_size) - - with fluid.recordio_writer.create_recordio_writer( - "./wmt16.recordio") as writer: - for batch in reader(): - for tensor in prepare_batch_input( - batch, ModelHyperParams.src_pad_idx, - ModelHyperParams.trg_pad_idx, ModelHyperParams.n_head): - t = fluid.LoDTensor() - t.set(tensor, fluid.CPUPlace()) - writer.append_tensor(t) - writer.complete_append_tensor() - - @unittest.skip("transformer is buggy in multi gpu") - def test_main(self): - self.check_network_convergence(transformer) - - -class ParallelExecutorTestingDuringTraining(unittest.TestCase): - def check_network_convergence(self): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): - loss = simple_fc_net(True) - test_program = main.clone(for_test=True) - - opt = fluid.optimizer.SGD(learning_rate=0.001) - opt.minimize(loss) - - batch_size = 32 - image = np.random.normal(size=(batch_size, 784)).astype('float32') - label = np.random.randint(0, 10, (batch_size, 1), dtype="int64") - - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(startup) - feed_dict = {'image': image, 'label': label} - - train_exe = fluid.ParallelExecutor( - use_cuda=True, loss_name=loss.name, main_program=main) - - test_exe = fluid.ParallelExecutor( - use_cuda=True, - main_program=test_program, - share_vars_from=train_exe) - - for i in xrange(5): - test_loss, = test_exe.run([loss.name], feed=feed_dict) - test_loss = np.array(test_loss) - - train_loss, = train_exe.run([loss.name], feed=feed_dict) - train_loss = np.array(train_loss) - self.assertTrue( - np.allclose( - train_loss, test_loss, atol=1e-8), - "Train loss: " + str(train_loss) + "\n Test loss:" + - str(test_loss)) - - def test_parallel(self): - self.check_network_convergence() - - -import paddle.dataset.conll05 as conll05 -import paddle.fluid as fluid - -word_dict, verb_dict, label_dict = conll05.get_dict() -word_dict_len = len(word_dict) -label_dict_len = len(label_dict) -pred_dict_len = len(verb_dict) -mark_dict_len = 2 -word_dim = 32 -mark_dim = 5 -hidden_dim = 512 -depth = 8 -mix_hidden_lr = 1e-3 -embedding_name = 'emb' - - -def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, - is_sparse, **ignored): - # 8 features - predicate_embedding = fluid.layers.embedding( - input=predicate, - is_sparse=is_sparse, - size=[pred_dict_len, word_dim], - dtype='float32', - param_attr='vemb') - - mark_embedding = fluid.layers.embedding( - input=mark, - is_sparse=is_sparse, - size=[mark_dict_len, mark_dim], - dtype='float32') - - word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] - emb_layers = [ - fluid.layers.embedding( - size=[word_dict_len, word_dim], - is_sparse=is_sparse, - input=x, - param_attr=fluid.ParamAttr( - name=embedding_name, trainable=False)) for x in word_input - ] - emb_layers.append(predicate_embedding) - emb_layers.append(mark_embedding) - - hidden_0_layers = [ - fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') - for emb in emb_layers - ] - - hidden_0 = fluid.layers.sums(input=hidden_0_layers) - - lstm_0 = fluid.layers.dynamic_lstm( - input=hidden_0, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid') - - # stack L-LSTM and R-LSTM with direct edges - input_tmp = [hidden_0, lstm_0] - - for i in range(1, depth): - mix_hidden = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') - ]) - - lstm = fluid.layers.dynamic_lstm( - input=mix_hidden, - size=hidden_dim, - candidate_activation='relu', - gate_activation='sigmoid', - cell_activation='sigmoid', - is_reverse=((i % 2) == 1)) - - input_tmp = [mix_hidden, lstm] - - feature_out = fluid.layers.sums(input=[ - fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), - fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') - ]) - - return feature_out - - -class TestCRFModel(unittest.TestCase): - def check_network_convergence(self, is_sparse): - main = fluid.Program() - startup = fluid.Program() - with fluid.program_guard(main, startup): - word = fluid.layers.data( - name='word_data', shape=[1], dtype='int64', lod_level=1) - predicate = fluid.layers.data( - name='verb_data', shape=[1], dtype='int64', lod_level=1) - ctx_n2 = fluid.layers.data( - name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) - ctx_n1 = fluid.layers.data( - name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) - ctx_0 = fluid.layers.data( - name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) - ctx_p1 = fluid.layers.data( - name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) - ctx_p2 = fluid.layers.data( - name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) - mark = fluid.layers.data( - name='mark_data', shape=[1], dtype='int64', lod_level=1) - - feature_out = db_lstm(**locals()) - target = fluid.layers.data( - name='target', shape=[1], dtype='int64', lod_level=1) - crf_cost = fluid.layers.linear_chain_crf( - input=feature_out, - label=target, - param_attr=fluid.ParamAttr( - name='crfw', learning_rate=1e-1)) - avg_cost = fluid.layers.mean(crf_cost) - - sgd_optimizer = fluid.optimizer.SGD( - learning_rate=fluid.layers.exponential_decay( - learning_rate=0.01, - decay_steps=100000, - decay_rate=0.5, - staircase=True)) - sgd_optimizer.minimize(avg_cost) - - train_data = paddle.batch( - paddle.reader.shuffle( - paddle.dataset.conll05.test(), buf_size=8192), - batch_size=16) - - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(startup) - - pe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name) - - feeder = fluid.DataFeeder( - feed_list=[ - word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, - mark, target - ], - place=fluid.CPUPlace()) - - data = train_data() - for i in xrange(10): - cur_batch = next(data) - print map(np.array, - pe.run(feed=feeder.feed(cur_batch), - fetch_list=[avg_cost.name]))[0] - - def test_update_sparse_parameter(self): - self.check_network_convergence(is_sparse=True) - - def test_update_dense_parameter(self): - self.check_network_convergence(is_sparse=False) - - -# test fetch all the variables of global_block - -import paddle.dataset.flowers as flowers -import math - - -def Lenet(data, class_dim): - conv1 = fluid.layers.conv2d(data, 32, 5, 1, act=None) - bn1 = fluid.layers.batch_norm(conv1, act='relu') - pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2) - conv2 = fluid.layers.conv2d(pool1, 50, 5, 1, act=None) - bn2 = fluid.layers.batch_norm(conv2, act='relu') - pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2) - - fc1 = fluid.layers.fc(pool2, size=500, act='relu') - fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax') - - return fc2 - - -class TestFetchOp(unittest.TestCase): - def parallel_exe(self, train_inputs, seed): - main = fluid.Program() - startup = fluid.Program() - startup.random_seed = seed - with fluid.program_guard(main, startup): - data = fluid.layers.data( - name='image', shape=[3, 224, 224], dtype='float32') - label = fluid.layers.data(name='label', shape=[1], dtype='int64') - out = Lenet(data, class_dim=102) - loss = fluid.layers.cross_entropy(input=out, label=label) - loss = fluid.layers.mean(loss) - - opt = fluid.optimizer.Momentum( - learning_rate=0.1, - momentum=0.9, - regularization=fluid.regularizer.L2Decay(1e-4)) - - opt.minimize(loss) - - # TODO(zcd): I found that onece the memory optimizer is open, - # parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD, - # conv2d_1.b_0@GRAD. Those variables should not be pruned. - # fluid.memory_optimize(main) - - place = fluid.CUDAPlace(0) - exe = fluid.Executor(place) - exe.run(startup) - - feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) - pe = fluid.ParallelExecutor( - use_cuda=True, loss_name=loss.name, main_program=main) - - fetch_list = [] - all_vars = main.global_block().vars - for k, v in all_vars.iteritems(): - if 'tmp' not in k and k[0] is not '_' or v.persistable: - fetch_list.append(k) - - for data in train_inputs: - ret = pe.run(fetch_list, feed=feeder.feed(data)) - for i in range(len(fetch_list)): - assert not math.isnan(np.sum(ret[i])) and \ - not math.isinf(np.sum(ret[i])) - - def test_update_sparse_parameter(self): - tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16) - tst_reader_iter = tst_reader() - - iters = 3 - train_inputs = [] - for i in range(iters): - train_inputs.append(tst_reader_iter.next()) - - self.parallel_exe(train_inputs, seed=1) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py new file mode 100644 index 0000000000..d17e493c36 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_crf.py @@ -0,0 +1,213 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.conll05 as conll05 +import paddle.fluid as fluid +import unittest +import paddle +import numpy as np +import os + +word_dict, verb_dict, label_dict = conll05.get_dict() +word_dict_len = len(word_dict) +label_dict_len = len(label_dict) +pred_dict_len = len(verb_dict) +mark_dict_len = 2 +word_dim = 32 +mark_dim = 5 +hidden_dim = 512 +depth = 8 +mix_hidden_lr = 1e-3 +embedding_name = 'emb' + + +def db_lstm(word, predicate, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, mark, + is_sparse, **ignored): + # 8 features + predicate_embedding = fluid.layers.embedding( + input=predicate, + is_sparse=is_sparse, + size=[pred_dict_len, word_dim], + dtype='float32', + param_attr='vemb') + + mark_embedding = fluid.layers.embedding( + input=mark, + is_sparse=is_sparse, + size=[mark_dict_len, mark_dim], + dtype='float32') + + word_input = [word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2] + emb_layers = [ + fluid.layers.embedding( + size=[word_dict_len, word_dim], + is_sparse=is_sparse, + input=x, + param_attr=fluid.ParamAttr( + name=embedding_name, trainable=False)) for x in word_input + ] + emb_layers.append(predicate_embedding) + emb_layers.append(mark_embedding) + + hidden_0_layers = [ + fluid.layers.fc(input=emb, size=hidden_dim, act='tanh') + for emb in emb_layers + ] + + hidden_0 = fluid.layers.sums(input=hidden_0_layers) + + lstm_0 = fluid.layers.dynamic_lstm( + input=hidden_0, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid') + + # stack L-LSTM and R-LSTM with direct edges + input_tmp = [hidden_0, lstm_0] + + for i in range(1, depth): + mix_hidden = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh') + ]) + + lstm = fluid.layers.dynamic_lstm( + input=mix_hidden, + size=hidden_dim, + candidate_activation='relu', + gate_activation='sigmoid', + cell_activation='sigmoid', + is_reverse=((i % 2) == 1)) + + input_tmp = [mix_hidden, lstm] + + feature_out = fluid.layers.sums(input=[ + fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'), + fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh') + ]) + + return feature_out + + +class TestCRFModel(unittest.TestCase): + def check_network_convergence(self, + is_sparse, + build_strategy=None, + use_cuda=True): + os.environ['CPU_NUM'] = str(4) + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + word = fluid.layers.data( + name='word_data', shape=[1], dtype='int64', lod_level=1) + predicate = fluid.layers.data( + name='verb_data', shape=[1], dtype='int64', lod_level=1) + ctx_n2 = fluid.layers.data( + name='ctx_n2_data', shape=[1], dtype='int64', lod_level=1) + ctx_n1 = fluid.layers.data( + name='ctx_n1_data', shape=[1], dtype='int64', lod_level=1) + ctx_0 = fluid.layers.data( + name='ctx_0_data', shape=[1], dtype='int64', lod_level=1) + ctx_p1 = fluid.layers.data( + name='ctx_p1_data', shape=[1], dtype='int64', lod_level=1) + ctx_p2 = fluid.layers.data( + name='ctx_p2_data', shape=[1], dtype='int64', lod_level=1) + mark = fluid.layers.data( + name='mark_data', shape=[1], dtype='int64', lod_level=1) + + feature_out = db_lstm(**locals()) + target = fluid.layers.data( + name='target', shape=[1], dtype='int64', lod_level=1) + crf_cost = fluid.layers.linear_chain_crf( + input=feature_out, + label=target, + param_attr=fluid.ParamAttr( + name='crfw', learning_rate=1e-1)) + avg_cost = fluid.layers.mean(crf_cost) + + sgd_optimizer = fluid.optimizer.SGD( + learning_rate=fluid.layers.exponential_decay( + learning_rate=0.01, + decay_steps=100000, + decay_rate=0.5, + staircase=True)) + sgd_optimizer.minimize(avg_cost) + + train_data = paddle.batch( + paddle.reader.shuffle( + paddle.dataset.conll05.test(), buf_size=8192), + batch_size=16) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup) + + pe = fluid.ParallelExecutor( + use_cuda=use_cuda, + loss_name=avg_cost.name, + build_strategy=build_strategy) + + feeder = fluid.DataFeeder( + feed_list=[ + word, ctx_n2, ctx_n1, ctx_0, ctx_p1, ctx_p2, predicate, + mark, target + ], + place=fluid.CPUPlace()) + + data = train_data() + for i in range(10): + cur_batch = next(data) + print(pe.run(feed=feeder.feed(cur_batch), + fetch_list=[avg_cost.name])[0]) + + @unittest.skip(reason="CI hangs") + def test_update_sparse_parameter_all_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=True) + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=False) + + @unittest.skip(reason="CI hangs") + def test_update_dense_parameter_all_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=True) + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=False) + + @unittest.skip(reason="CI hangs") + def test_update_sparse_parameter_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=True) + self.check_network_convergence( + is_sparse=True, build_strategy=build_strategy, use_cuda=False) + + @unittest.skip(reason="CI hangs") + def test_update_dense_parameter_reduce(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=True) + self.check_network_convergence( + is_sparse=False, build_strategy=build_strategy, use_cuda=False) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py new file mode 100644 index 0000000000..a43f2e7c49 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_fetch_feed.py @@ -0,0 +1,148 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.dataset.flowers as flowers +import math +import paddle.fluid as fluid +import paddle.fluid.core as core +import unittest +import numpy as np +import paddle +import os + + +def Lenet(data, class_dim): + conv1 = fluid.layers.conv2d(data, 32, 5, 1, act=None) + bn1 = fluid.layers.batch_norm(conv1, act='relu') + pool1 = fluid.layers.pool2d(bn1, 2, 'max', 2) + conv2 = fluid.layers.conv2d(pool1, 50, 5, 1, act=None) + bn2 = fluid.layers.batch_norm(conv2, act='relu') + pool2 = fluid.layers.pool2d(bn2, 2, 'max', 2) + + fc1 = fluid.layers.fc(pool2, size=500, act='relu') + fc2 = fluid.layers.fc(fc1, size=class_dim, act='softmax') + + return fc2 + + +class TestFetchOp(unittest.TestCase): + def parallel_exe(self, train_inputs, seed, use_cuda): + main = fluid.Program() + startup = fluid.Program() + startup.random_seed = seed + with fluid.program_guard(main, startup): + data = fluid.layers.data( + name='image', shape=[3, 224, 224], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + out = Lenet(data, class_dim=102) + loss = fluid.layers.cross_entropy(input=out, label=label) + loss = fluid.layers.mean(loss) + + opt = fluid.optimizer.Momentum( + learning_rate=0.1, + momentum=0.9, + regularization=fluid.regularizer.L2Decay(1e-4)) + + opt.minimize(loss) + + # TODO(zcd): I found that onece the memory optimizer is open, + # parallel_exe doesn't fetch some variable, such as conv2d_0.b_0@GRAD, + # conv2d_1.b_0@GRAD. Those variables should not be pruned. + # fluid.memory_optimize(main) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup) + + feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) + pe = fluid.ParallelExecutor( + use_cuda=use_cuda, loss_name=loss.name, main_program=main) + + fetch_list = [] + all_vars = main.global_block().vars + for k, v in all_vars.items(): + if 'tmp' not in k and k[0] is not '_' or v.persistable: + fetch_list.append(k) + + for data in train_inputs: + ret = pe.run(fetch_list, + feed=feeder.feed(data), + return_numpy=True) + for i in range(len(fetch_list)): + assert not math.isnan(np.sum(ret[i])) and \ + not math.isinf(np.sum(ret[i])) + + def test_fetch_op(self): + tst_reader = paddle.batch(flowers.test(use_xmap=False), batch_size=16) + tst_reader_iter = tst_reader() + + iters = 3 + train_inputs = [] + for i in range(iters): + train_inputs.append(next(tst_reader_iter)) + + os.environ['CPU_NUM'] = str(4) + if core.is_compiled_with_cuda(): + self.parallel_exe(train_inputs, seed=1, use_cuda=True) + self.parallel_exe(train_inputs, seed=1, use_cuda=False) + + +class TestFeedParallel(unittest.TestCase): + def parallel_exe(self, use_cuda, seed): + main = fluid.Program() + startup = fluid.Program() + startup.random_seed = seed + with fluid.scope_guard(fluid.core.Scope()): + with fluid.program_guard(main, startup): + data = fluid.layers.data( + name='image', shape=[3, 224, 224], dtype='float32') + label = fluid.layers.data( + name='label', shape=[1], dtype='int64') + out = Lenet(data, class_dim=102) + loss = fluid.layers.cross_entropy(input=out, label=label) + loss = fluid.layers.mean(loss) + opt = fluid.optimizer.Momentum( + learning_rate=0.1, + momentum=0.9, + regularization=fluid.regularizer.L2Decay(1e-4)) + + opt.minimize(loss) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + feeder = fluid.DataFeeder(place=place, feed_list=[data, label]) + reader = feeder.decorate_reader( + paddle.batch( + flowers.train(), batch_size=16), multi_devices=True) + + exe = fluid.Executor(place) + exe.run(startup) + + pe = fluid.ParallelExecutor( + use_cuda=use_cuda, loss_name=loss.name, main_program=main) + + for batch_id, data in enumerate(reader()): + loss_np = pe.run(feed=data, fetch_list=[loss.name])[0] + print(batch_id, loss_np) + if batch_id == 2: + break + + def test_feed_op(self): + os.environ['CPU_NUM'] = str(4) + if core.is_compiled_with_cuda(): + self.parallel_exe(use_cuda=True, seed=1) + self.parallel_exe(use_cuda=False, seed=1) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py new file mode 100644 index 0000000000..9448d89cd5 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_mnist.py @@ -0,0 +1,217 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from parallel_executor_test_base import TestParallelExecutorBase +import paddle.fluid as fluid +import paddle.fluid.core as core +import numpy as np +import paddle +import paddle.dataset.mnist as mnist +import unittest +import os + +MNIST_RECORDIO_FILE = "./mnist_test_pe.recordio" + + +def simple_fc_net(use_feed): + if use_feed: + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + else: + reader = fluid.layers.open_files( + filenames=[MNIST_RECORDIO_FILE], + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + reader = fluid.layers.io.double_buffer(reader) + img, label = fluid.layers.read_file(reader) + hidden = img + for _ in range(4): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +def fc_with_batchnorm(use_feed): + if use_feed: + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + else: + reader = fluid.layers.open_files( + filenames=[MNIST_RECORDIO_FILE], + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + reader = fluid.layers.io.double_buffer(reader) + img, label = fluid.layers.read_file(reader) + + hidden = img + for _ in range(1): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + + hidden = fluid.layers.batch_norm(input=hidden) + + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +class TestMNIST(TestParallelExecutorBase): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + # Convert mnist to recordio file + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(mnist.train(), batch_size=4) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_file( + MNIST_RECORDIO_FILE, reader, feeder) + + def _init_data(self): + np.random.seed(5) + img = np.random.random(size=[32, 784]).astype(np.float32) + label = np.ones(shape=[32, 1], dtype='int64') + return img, label + + def _compare_reduce_and_allreduce(self, model, use_cuda): + if use_cuda and not core.is_compiled_with_cuda(): + return + self.check_network_convergence( + model, use_cuda=use_cuda, use_reduce=True) + self.check_network_convergence( + model, use_cuda=use_cuda, allow_op_delay=True, use_reduce=True) + + img, label = self._init_data() + + all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_reduce=False) + reduce_first_loss, reduce_last_loss = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_reduce=True) + + for loss in zip(all_reduce_first_loss, reduce_first_loss): + self.assertAlmostEqual(loss[0], loss[1], delta=1e-6) + for loss in zip(all_reduce_last_loss, reduce_last_loss): + self.assertAlmostEqual(loss[0], loss[1], delta=1e-4) + + # simple_fc + def check_simple_fc_convergence(self, use_cuda, use_reduce=False): + if use_cuda and not core.is_compiled_with_cuda(): + return + self.check_network_convergence(simple_fc_net, use_cuda=use_cuda) + self.check_network_convergence( + simple_fc_net, use_cuda=use_cuda, allow_op_delay=True) + + img, label = self._init_data() + + self.check_network_convergence( + simple_fc_net, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_reduce=use_reduce) + + def test_simple_fc(self): + # use_cuda + self.check_simple_fc_convergence(True) + self.check_simple_fc_convergence(False) + + def test_simple_fc_with_new_strategy(self): + # use_cuda, use_reduce + self._compare_reduce_and_allreduce(simple_fc_net, True) + self._compare_reduce_and_allreduce(simple_fc_net, False) + + def check_simple_fc_parallel_accuracy(self, use_cuda): + if use_cuda and not core.is_compiled_with_cuda(): + return + + img, label = self._init_data() + + single_first_loss, single_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_parallel_executor=False) + parallel_first_loss, parallel_last_loss = self.check_network_convergence( + method=simple_fc_net, + seed=1, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda, + use_parallel_executor=True) + + self.assertAlmostEquals( + np.mean(parallel_first_loss), single_first_loss, delta=1e-6) + self.assertAlmostEquals( + np.mean(parallel_last_loss), single_last_loss, delta=1e-6) + + def test_simple_fc_parallel_accuracy(self): + self.check_simple_fc_parallel_accuracy(True) + self.check_simple_fc_parallel_accuracy(False) + + def check_batchnorm_fc_convergence(self, use_cuda): + if use_cuda and not core.is_compiled_with_cuda(): + return + + self.check_network_convergence(fc_with_batchnorm, use_cuda=use_cuda) + + img, label = self._init_data() + + self.check_network_convergence( + fc_with_batchnorm, + feed_dict={"image": img, + "label": label}, + use_cuda=use_cuda) + + def test_batchnorm_fc(self): + self.check_batchnorm_fc_convergence(True) + self.check_batchnorm_fc_convergence(False) + + def test_batchnorm_fc_with_new_strategy(self): + # FIXME(zcd): close this test temporally. + # self._compare_reduce_and_allreduce(fc_with_batchnorm, True) + self._compare_reduce_and_allreduce(fc_with_batchnorm, False) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py new file mode 100644 index 0000000000..a28428d8de --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_seresnext.py @@ -0,0 +1,286 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import paddle.fluid.layers.ops as ops +from paddle.fluid.initializer import init_on_cpu +from paddle.fluid.layers.learning_rate_scheduler import _decay_step_counter +import paddle.fluid.core as core +from parallel_executor_test_base import TestParallelExecutorBase +import unittest +import math +import os +import numpy as np + +# FIXME(zcd): If the neural net has dropout_op, the output of ParallelExecutor +# and Executor is different. Because, for ParallelExecutor, the dropout_op of +# the neural net will be copied N copies(N is the number of device). This will +# lead to the random numbers generated by ParallelExecutor and Executor are different. +# So, if we compare the loss of ParallelExecutor and Executor, we should remove the +# dropout_op. +remove_dropout = False + +# FIXME(zcd): If the neural net has batch_norm, the output of ParallelExecutor +# and Executor is different. +remove_bn = False + + +def squeeze_excitation(input, num_channels, reduction_ratio): + # pool = fluid.layers.pool2d( + # input=input, pool_size=0, pool_type='avg', global_pooling=True) + conv = input + shape = conv.shape + reshape = fluid.layers.reshape( + x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) + pool = fluid.layers.reduce_mean(input=reshape, dim=2) + + squeeze = fluid.layers.fc(input=pool, + size=num_channels / reduction_ratio, + act='relu') + excitation = fluid.layers.fc(input=squeeze, + size=num_channels, + act='sigmoid') + scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0) + return scale + + +def conv_bn_layer(input, num_filters, filter_size, stride=1, groups=1, + act=None): + conv = fluid.layers.conv2d( + input=input, + num_filters=num_filters, + filter_size=filter_size, + stride=stride, + padding=(filter_size - 1) / 2, + groups=groups, + act=None, + bias_attr=False) + return conv if remove_bn else fluid.layers.batch_norm( + input=conv, act=act, momentum=0.1) + + +def shortcut(input, ch_out, stride): + ch_in = input.shape[1] + if ch_in != ch_out: + if stride == 1: + filter_size = 1 + else: + filter_size = 3 + return conv_bn_layer(input, ch_out, filter_size, stride) + else: + return input + + +def bottleneck_block(input, num_filters, stride, cardinality, reduction_ratio): + # The number of first 1x1 convolutional channels for each bottleneck build block + # was halved to reduce the compution cost. + conv0 = conv_bn_layer( + input=input, num_filters=num_filters, filter_size=1, act='relu') + conv1 = conv_bn_layer( + input=conv0, + num_filters=num_filters * 2, + filter_size=3, + stride=stride, + groups=cardinality, + act='relu') + conv2 = conv_bn_layer( + input=conv1, num_filters=num_filters * 2, filter_size=1, act=None) + scale = squeeze_excitation( + input=conv2, + num_channels=num_filters * 2, + reduction_ratio=reduction_ratio) + + short = shortcut(input, num_filters * 2, stride) + + return fluid.layers.elementwise_add(x=short, y=scale, act='relu') + + +batch_size = 12 +img_shape = [3, 224, 224] + + +def SE_ResNeXt50Small(use_feed): + + img = fluid.layers.data(name='image', shape=img_shape, dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + + conv = conv_bn_layer( + input=img, num_filters=16, filter_size=3, stride=2, act='relu') + conv = conv_bn_layer( + input=conv, num_filters=16, filter_size=3, stride=1, act='relu') + conv = conv_bn_layer( + input=conv, num_filters=16, filter_size=3, stride=1, act='relu') + conv = fluid.layers.pool2d( + input=conv, pool_size=3, pool_stride=2, pool_padding=1, pool_type='max') + + cardinality = 32 + reduction_ratio = 16 + depth = [3, 4, 6, 3] + num_filters = [128, 256, 512, 1024] + + for block in range(len(depth)): + for i in range(depth[block]): + conv = bottleneck_block( + input=conv, + num_filters=num_filters[block], + stride=2 if i == 0 and block != 0 else 1, + cardinality=cardinality, + reduction_ratio=reduction_ratio) + + shape = conv.shape + reshape = fluid.layers.reshape( + x=conv, shape=[-1, shape[1], shape[2] * shape[3]]) + pool = fluid.layers.reduce_mean(input=reshape, dim=2) + dropout = pool if remove_dropout else fluid.layers.dropout( + x=pool, dropout_prob=0.2, seed=1) + # Classifier layer: + prediction = fluid.layers.fc(input=dropout, size=1000, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +def cosine_decay(learning_rate, step_each_epoch, epochs=120): + """ + Applies cosine decay to the learning rate. + lr = 0.05 * (math.cos(epoch * (math.pi / 120)) + 1) + """ + global_step = _decay_step_counter() + + with init_on_cpu(): + epoch = ops.floor(global_step / step_each_epoch) + decayed_lr = learning_rate * \ + (ops.cos(epoch * (math.pi / epochs)) + 1)/2 + return decayed_lr + + +def optimizer(learning_rate=0.01): + optimizer = fluid.optimizer.Momentum( + learning_rate=cosine_decay( + learning_rate=learning_rate, step_each_epoch=2, epochs=1), + momentum=0.9, + regularization=fluid.regularizer.L2Decay(1e-4)) + return optimizer + + +class TestResnet(TestParallelExecutorBase): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + global remove_dropout + global remove_bn + remove_dropout = False + remove_bn = False + + def _init_data(self, batch_size=2, random=True): + np.random.seed(5) + if random: + img = np.random.random( + size=[batch_size] + img_shape).astype(np.float32) + else: + img = np.ones(shape=[batch_size] + img_shape, dtype='float32') + label = [np.random.randint(0, 999) for _ in range(batch_size)] + label = np.array(label).astype(np.int64).reshape(-1, 1) + return img, label + + def _compare_reduce_and_allreduce(self, + model, + use_cuda, + iter=20, + delta2=1e-6): + if use_cuda and not core.is_compiled_with_cuda(): + return + + global remove_bn + remove_bn = True + + img, label = self._init_data(batch_size=batch_size) + all_reduce_first_loss, all_reduce_last_loss = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + iter=iter, + batch_size=batch_size, + use_cuda=use_cuda, + use_reduce=False, + optimizer=optimizer) + reduce_first_loss, reduce_last_loss = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + iter=iter, + batch_size=batch_size, + use_cuda=use_cuda, + use_reduce=True, + optimizer=optimizer) + + for loss in zip(all_reduce_first_loss, reduce_first_loss): + self.assertAlmostEquals(loss[0], loss[1], delta=1e-6) + for loss in zip(all_reduce_last_loss, reduce_last_loss): + self.assertAlmostEquals(loss[0], loss[1], delta=delta2) + + def _check_resnet_convergence(self, + model, + use_cuda=True, + use_reduce=False, + iter=20, + delta2=1e-6): + if use_cuda and not core.is_compiled_with_cuda(): + return + + global remove_dropout + global remove_bn + remove_dropout = True + remove_bn = True + + img, label = self._init_data(batch_size=batch_size) + single_first_loss, single_last_loss = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + iter=iter, + batch_size=batch_size, + use_cuda=use_cuda, + use_reduce=use_reduce, + optimizer=optimizer, + use_parallel_executor=False) + parallel_first_loss, parallel_last_loss = self.check_network_convergence( + model, + feed_dict={"image": img, + "label": label}, + iter=iter, + batch_size=batch_size, + use_cuda=use_cuda, + use_reduce=use_reduce, + optimizer=optimizer) + + self.assertAlmostEquals( + np.mean(parallel_first_loss), single_first_loss[0], delta=1e-6) + self.assertAlmostEquals( + np.mean(parallel_last_loss), single_last_loss[0], delta=delta2) + + def test_seresnext_with_learning_rate_decay(self): + self._check_resnet_convergence(model=SE_ResNeXt50Small, use_cuda=True) + self._check_resnet_convergence( + model=SE_ResNeXt50Small, use_cuda=False, iter=2, delta2=1e-3) + + def test_seresnext_with_new_strategy(self): + self._compare_reduce_and_allreduce( + model=SE_ResNeXt50Small, use_cuda=True, delta2=1e-2) + self._compare_reduce_and_allreduce( + model=SE_ResNeXt50Small, use_cuda=False, iter=5) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py new file mode 100644 index 0000000000..fcb5947ff0 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_test_while_train.py @@ -0,0 +1,113 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import paddle.fluid.core as core +import numpy as np +import unittest +import os +import sys +import math + + +def simple_fc_net(): + img = fluid.layers.data(name='image', shape=[784], dtype='float32') + label = fluid.layers.data(name='label', shape=[1], dtype='int64') + hidden = img + for _ in range(4): + hidden = fluid.layers.fc( + hidden, + size=200, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + prediction = fluid.layers.fc(hidden, size=10, act='softmax') + loss = fluid.layers.cross_entropy(input=prediction, label=label) + loss = fluid.layers.mean(loss) + return loss + + +class ParallelExecutorTestingDuringTraining(unittest.TestCase): + def check_network_convergence(self, use_cuda, build_strategy=None): + os.environ['CPU_NUM'] = str(4) + main = fluid.Program() + startup = fluid.Program() + with fluid.program_guard(main, startup): + loss = simple_fc_net() + test_program = main.clone(for_test=True) + + opt = fluid.optimizer.SGD(learning_rate=0.001) + opt.minimize(loss) + + batch_size = 32 + image = np.random.normal(size=(batch_size, 784)).astype('float32') + label = np.random.randint(0, 10, (batch_size, 1), dtype="int64") + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup) + feed_dict = {'image': image, 'label': label} + + train_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + loss_name=loss.name, + main_program=main, + build_strategy=build_strategy) + + test_exe = fluid.ParallelExecutor( + use_cuda=use_cuda, + main_program=test_program, + share_vars_from=train_exe, + build_strategy=build_strategy) + + for i in range(5): + test_loss, = test_exe.run([loss.name], feed=feed_dict) + + train_loss, = train_exe.run([loss.name], feed=feed_dict) + + avg_test_loss_val = np.array(test_loss).mean() + if math.isnan(float(avg_test_loss_val)): + sys.exit("got NaN loss, testing failed.") + + avg_train_loss_val = np.array(train_loss).mean() + if math.isnan(float(avg_train_loss_val)): + sys.exit("got NaN loss, training failed.") + + self.assertTrue( + np.allclose( + train_loss, test_loss, atol=1e-8), + "Train loss: " + str(train_loss) + "\n Test loss:" + + str(test_loss)) + + def test_parallel_testing(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.AllReduce + if core.is_compiled_with_cuda(): + self.check_network_convergence( + use_cuda=True, build_strategy=build_strategy) + self.check_network_convergence( + use_cuda=False, build_strategy=build_strategy) + + def test_parallel_testing_with_new_strategy(self): + build_strategy = fluid.BuildStrategy() + build_strategy.reduce_strategy = fluid.BuildStrategy.ReduceStrategy.Reduce + if core.is_compiled_with_cuda(): + self.check_network_convergence( + use_cuda=True, build_strategy=build_strategy) + self.check_network_convergence( + use_cuda=False, build_strategy=build_strategy) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py new file mode 100644 index 0000000000..8203d5d1fc --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_transformer.py @@ -0,0 +1,176 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import transformer_model +import numpy as np +from parallel_executor_test_base import TestParallelExecutorBase +import unittest +import paddle +import paddle.dataset.wmt16 as wmt16 +import os + +WMT16_RECORDIO_FILE = "/tmp/wmt16.recordio" + + +class ModelHyperParams(object): + # Dictionary size for source and target language. This model directly uses + # paddle.dataset.wmt16 in which , and token has + # alreay been added, but the token is not added. Transformer requires + # sequences in a mini-batch are padded to have the same length. A token is + # added into the original dictionary in paddle.dateset.wmt16. + + # size of source word dictionary. + src_vocab_size = 10000 + # index for token in source language. + src_pad_idx = src_vocab_size + + # size of target word dictionay + trg_vocab_size = 10000 + # index for token in target language. + trg_pad_idx = trg_vocab_size + + # position value corresponding to the token. + pos_pad_idx = 0 + + # max length of sequences. It should plus 1 to include position + # padding token for position encoding. + max_length = 50 + + # the dimension for word embeddings, which is also the last dimension of + # the input and output of multi-head attention, position-wise feed-forward + # networks, encoder and decoder. + + d_model = 512 + # size of the hidden layer in position-wise feed-forward networks. + d_inner_hid = 1024 + # the dimension that keys are projected to for dot-product attention. + d_key = 64 + # the dimension that values are projected to for dot-product attention. + d_value = 64 + # number of head used in multi-head attention. + n_head = 8 + # number of sub-layers to be stacked in the encoder and decoder. + n_layer = 6 + # dropout rate used by all dropout layers. + dropout = 0.1 + + +def prepare_batch_input(insts, src_pad_idx, trg_pad_idx, n_head): + """ + Pad the instances to the max sequence length in batch, and generate the + corresponding position data and attention bias. Then, convert the numpy + data to tensors and return a dict mapping names to tensors. + """ + + def __pad_batch_data(insts, + pad_idx, + is_target=False, + return_pos=True, + return_attn_bias=True, + return_max_len=True): + """ + Pad the instances to the max sequence length in batch, and generate the + corresponding position data and attention bias. + """ + return_list = [] + max_len = max(len(inst) for inst in insts) + inst_data = np.array( + [inst + [pad_idx] * (max_len - len(inst)) for inst in insts]) + return_list += [inst_data.astype("int64").reshape([-1, 1])] + if return_pos: + inst_pos = np.array([[ + pos_i + 1 if w_i != pad_idx else 0 + for pos_i, w_i in enumerate(inst) + ] for inst in inst_data]) + + return_list += [inst_pos.astype("int64").reshape([-1, 1])] + if return_attn_bias: + if is_target: + # This is used to avoid attention on paddings and subsequent + # words. + slf_attn_bias_data = np.ones((inst_data.shape[0], max_len, + max_len)) + slf_attn_bias_data = np.triu(slf_attn_bias_data, 1).reshape( + [-1, 1, max_len, max_len]) + slf_attn_bias_data = np.tile(slf_attn_bias_data, + [1, n_head, 1, 1]) * [-1e9] + else: + # This is used to avoid attention on paddings. + slf_attn_bias_data = np.array([[0] * len(inst) + [-1e9] * + (max_len - len(inst)) + for inst in insts]) + slf_attn_bias_data = np.tile( + slf_attn_bias_data.reshape([-1, 1, 1, max_len]), + [1, n_head, max_len, 1]) + return_list += [slf_attn_bias_data.astype("float32")] + if return_max_len: + return_list += [max_len] + return return_list if len(return_list) > 1 else return_list[0] + + src_word, src_pos, src_slf_attn_bias, src_max_len = __pad_batch_data( + [inst[0] for inst in insts], src_pad_idx, is_target=False) + trg_word, trg_pos, trg_slf_attn_bias, trg_max_len = __pad_batch_data( + [inst[1] for inst in insts], trg_pad_idx, is_target=True) + trg_src_attn_bias = np.tile(src_slf_attn_bias[:, :, ::src_max_len, :], + [1, 1, trg_max_len, 1]).astype("float32") + lbl_word = __pad_batch_data([inst[2] for inst in insts], trg_pad_idx, False, + False, False, False) + lbl_weight = (lbl_word != trg_pad_idx).astype("float32").reshape([-1, 1]) + + return [ + src_word, src_pos, trg_word, trg_pos, src_slf_attn_bias, + trg_slf_attn_bias, trg_src_attn_bias, lbl_word, lbl_weight + ] + + +def transformer(use_feed): + assert not use_feed, "transfomer doesn't support feed yet" + return transformer_model.transformer( + ModelHyperParams.src_vocab_size + 1, + ModelHyperParams.trg_vocab_size + 1, ModelHyperParams.max_length + 1, + ModelHyperParams.n_layer, ModelHyperParams.n_head, + ModelHyperParams.d_key, ModelHyperParams.d_value, + ModelHyperParams.d_model, ModelHyperParams.d_inner_hid, + ModelHyperParams.dropout, ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, ModelHyperParams.pos_pad_idx) + + +class TestTransformer(TestParallelExecutorBase): + @classmethod + def setUpClass(cls): + os.environ['CPU_NUM'] = str(4) + reader = paddle.batch( + wmt16.train(ModelHyperParams.src_vocab_size, + ModelHyperParams.trg_vocab_size), + batch_size=transformer_model.batch_size) + + with fluid.recordio_writer.create_recordio_writer( + WMT16_RECORDIO_FILE) as writer: + for batch in reader(): + for tensor in prepare_batch_input( + batch, ModelHyperParams.src_pad_idx, + ModelHyperParams.trg_pad_idx, ModelHyperParams.n_head): + t = fluid.LoDTensor() + t.set(tensor, fluid.CPUPlace()) + writer.append_tensor(t) + writer.complete_append_tensor() + + def test_main(self): + self.check_network_convergence(transformer, use_cuda=True) + self.check_network_convergence(transformer, use_cuda=False, iter=5) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_parallel_op.py b/python/paddle/fluid/tests/unittests/test_parallel_op.py index 79bea148f9..c9617e3677 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_op.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_op.py @@ -15,8 +15,10 @@ import unittest import paddle.fluid as fluid +from paddle.fluid.layers.device import get_places import paddle.fluid.profiler as profiler import numpy +import six class BaseParallelForTest(unittest.TestCase): @@ -24,20 +26,20 @@ class BaseParallelForTest(unittest.TestCase): """ Run the unittest for parallel.for Args: - callback(callable): A callable function returns a generator. There - are two yields in the generator function. The first yield - returns the data layers, and the second yield returns the loss. - The modified data variables will be sent back during the first + callback(callable): A callable function returns a generator. There + are two yields in the generator function. The first yield + returns the data layers, and the second yield returns the loss. + The modified data variables will be sent back during the first yield. feed(dict): The executor feeding dictionary. - fetch(list|basestr): The fetch name lists. + fetch(list|basestr): The fetch name lists. Returns: None Raises: - AssertionError when the computation of cpu, parallel.for in cpu, + AssertionError when the computation of cpu, parallel.for in cpu, gpu, parallel.for in gpu are different. """ @@ -94,14 +96,14 @@ class BaseParallelForTest(unittest.TestCase): """ Run a single test, returns the fetch values Args: - place(Place): the computation place. - use_parallel(bool): Whether use parallel.for or not. + place(Place): the computation place. + use_parallel(bool): Whether use parallel.for or not. Returns: Fetched numpy arrays. """ - if isinstance(fetch, basestring): + if isinstance(fetch, six.string_types): fetch = [fetch] main = fluid.Program() startup = fluid.Program() @@ -113,15 +115,17 @@ class BaseParallelForTest(unittest.TestCase): generator = callback() # Automatically insert parallel do if use_parallel = True if use_parallel: - places = fluid.layers.get_places() + thread_num = fluid.core.get_cuda_device_count( + ) if use_gpu else 8 + places = get_places(thread_num) pd = fluid.layers.ParallelDo(places, use_nccl=use_nccl) data = next(generator) - if isinstance(data, fluid.Variable): + if isinstance(data, fluid.framework.Variable): data = [data] with pd.do(): - ins = map(pd.read_input, data) + ins = list(map(pd.read_input, data)) if len(ins) == 1: ins = ins[0] loss = generator.send(ins) # patch input @@ -153,7 +157,7 @@ class BaseParallelForTest(unittest.TestCase): Returns: None - + Raises: AssertionError diff --git a/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py new file mode 100644 index 0000000000..8aff4e87f6 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_polygon_box_transform.py @@ -0,0 +1,68 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +def PolygonBoxRestore(input): + shape = input.shape + batch_size = shape[0] + geo_channels = shape[1] + h = shape[2] + w = shape[3] + h_indexes = np.array(list(range(h)) * w).reshape( + [w, h]).transpose()[np.newaxis, :] # [1, h, w] + w_indexes = np.array(list(range(w)) * h).reshape( + [h, w])[np.newaxis, :] # [1, h, w] + indexes = np.concatenate( + (w_indexes, h_indexes))[np.newaxis, :] # [1, 2, h, w] + indexes = indexes.repeat( + [geo_channels / 2], + axis=0)[np.newaxis, :] # [1, geo_channels/2, 2, h, w] + indexes = indexes.repeat( + [batch_size], axis=0) # [batch_size, geo_channels/2, 2, h, w] + return indexes.reshape( + input.shape) - input # [batch_size, geo_channels, h, w] + + +class TestPolygonBoxRestoreOp(OpTest): + def config(self): + self.input_shape = (1, 8, 2, 2) + + def setUp(self): + self.config() + self.op_type = "polygon_box_transform" + input = np.random.random(self.input_shape).astype("float32") + self.inputs = {'Input': input} + output = PolygonBoxRestore(input) + self.outputs = {'Output': output} + + def test_check_output(self): + self.check_output() + + +class TestCase1(TestPolygonBoxRestoreOp): + def config(self): + self.input_shape = (2, 10, 3, 2) + + +class TestCase2(TestPolygonBoxRestoreOp): + def config(self): + self.input_shape = (3, 12, 4, 5) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index f7e1e85732..1cf70311b4 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -35,8 +35,8 @@ def max_pool2D_forward_naive(x, ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) - for i in xrange(H_out): - for j in xrange(W_out): + for i in range(H_out): + for j in range(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) @@ -63,8 +63,8 @@ def avg_pool2D_forward_naive(x, ) / strides[1] + 1 if ceil_mode else (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) - for i in xrange(H_out): - for j in xrange(W_out): + for i in range(H_out): + for j in range(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index 142165f29b..92c64b3792 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -38,13 +38,13 @@ def max_pool3D_forward_naive(x, ) / strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) - for k in xrange(D_out): + for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) - for i in xrange(H_out): + for i in range(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) - for j in xrange(W_out): + for j in range(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] @@ -72,13 +72,13 @@ def avg_pool3D_forward_naive(x, ) / strides[2] + 1 if ceil_mode else (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) - for k in xrange(D_out): + for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) - for i in xrange(H_out): + for i in range(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) - for j in xrange(W_out): + for j in range(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] diff --git a/python/paddle/fluid/tests/unittests/test_pool_max_op.py b/python/paddle/fluid/tests/unittests/test_pool_max_op.py index cf9b763922..e6a9f6f08c 100644 --- a/python/paddle/fluid/tests/unittests/test_pool_max_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool_max_op.py @@ -29,21 +29,21 @@ def max_pool3D_forward_naive(x, ksize, strides, paddings, global_pool=False): W_out = (W - ksize[2] + 2 * paddings[2]) / strides[2] + 1 out = np.zeros((N, C, D_out, H_out, W_out)) mask = np.zeros((N, C, D_out, H_out, W_out)) - for k in xrange(D_out): + for k in range(D_out): d_start = np.max((k * strides[0] - paddings[0], 0)) d_end = np.min((k * strides[0] + ksize[0] - paddings[0], D)) - for i in xrange(H_out): + for i in range(H_out): h_start = np.max((i * strides[0] - paddings[0], 0)) h_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) - for j in xrange(W_out): + for j in range(W_out): w_start = np.max((j * strides[1] - paddings[1], 0)) w_end = np.min((j * strides[1] + ksize[1] - paddings[1], W)) x_masked = x[:, :, d_start:d_end, h_start:h_end, w_start:w_end] out[:, :, k, i, j] = np.max(x_masked, axis=(2, 3, 4)) - for n in xrange(N): - for c in xrange(C): + for n in range(N): + for c in range(C): arr = x_masked[n, c, :, :, :] index = np.where(arr == np.max(arr)) sub_deep = index[0][0] @@ -67,8 +67,8 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): W_out = (W - ksize[1] + 2 * paddings[1]) / strides[1] + 1 out = np.zeros((N, C, H_out, W_out)) mask = np.zeros((N, C, H_out, W_out)) - for i in xrange(H_out): - for j in xrange(W_out): + for i in range(H_out): + for j in range(W_out): r_start = np.max((i * strides[0] - paddings[0], 0)) r_end = np.min((i * strides[0] + ksize[0] - paddings[0], H)) c_start = np.max((j * strides[1] - paddings[1], 0)) @@ -77,8 +77,8 @@ def max_pool2D_forward_naive(x, ksize, strides, paddings, global_pool=False): out[:, :, i, j] = np.max(x_masked, axis=(2, 3)) - for n in xrange(N): - for c in xrange(C): + for n in range(N): + for c in range(C): arr = x_masked[n, c, :, :] index = np.where(arr == np.max(arr)) sub_row = index[0][0] diff --git a/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py index 091cfc9c72..8c76393bda 100644 --- a/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py +++ b/python/paddle/fluid/tests/unittests/test_positive_negative_pair_op.py @@ -32,7 +32,7 @@ def py_pnpair_op(score, label, query, column=-1, weight=None): # accumulate statistics pos, neg, neu = 0, 0, 0 - for _, ranks in predictions.items(): + for _, ranks in list(predictions.items()): for e1, e2 in itertools.combinations(ranks, 2): s1, s2, l1, l2, w1, w2 = e1[0], e2[0], e1[1], e2[1], e1[2], e2[2] w = (w1 + w2) * 0.5 diff --git a/python/paddle/fluid/tests/unittests/test_precision_recall_op.py b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py index 7830ba2958..5ae425fee1 100644 --- a/python/paddle/fluid/tests/unittests/test_precision_recall_op.py +++ b/python/paddle/fluid/tests/unittests/test_precision_recall_op.py @@ -39,19 +39,19 @@ def get_states(idxs, labels, cls_num, weights=None): ins_num = idxs.shape[0] # TP FP TN FN states = np.zeros((cls_num, 4)).astype('float32') - for i in xrange(ins_num): + for i in range(ins_num): w = weights[i] if weights is not None else 1.0 idx = idxs[i][0] label = labels[i][0] if idx == label: states[idx][0] += w - for j in xrange(cls_num): + for j in range(cls_num): states[j][2] += w states[idx][2] -= w else: states[label][3] += w states[idx][1] += w - for j in xrange(cls_num): + for j in range(cls_num): states[j][2] += w states[label][2] -= w states[idx][2] -= w @@ -64,7 +64,7 @@ def compute_metrics(states, cls_num): total_fn_count = 0.0 macro_avg_precision = 0.0 macro_avg_recall = 0.0 - for i in xrange(cls_num): + for i in range(cls_num): total_tp_count += states[i][0] total_fp_count += states[i][1] total_fn_count += states[i][3] @@ -90,9 +90,9 @@ class TestPrecisionRecallOp_0(OpTest): ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(xrange(cls_num), ins_num).reshape( + idxs = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') - labels = np.random.choice(xrange(cls_num), ins_num).reshape( + labels = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') states = get_states(idxs, labels, cls_num) metrics = compute_metrics(states, cls_num) @@ -117,10 +117,10 @@ class TestPrecisionRecallOp_1(OpTest): ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(xrange(cls_num), ins_num).reshape( + idxs = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - labels = np.random.choice(xrange(cls_num), ins_num).reshape( + labels = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') states = get_states(idxs, labels, cls_num, weights) @@ -151,10 +151,10 @@ class TestPrecisionRecallOp_2(OpTest): ins_num = 64 cls_num = 10 max_probs = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - idxs = np.random.choice(xrange(cls_num), ins_num).reshape( + idxs = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') weights = np.random.uniform(0, 1.0, (ins_num, 1)).astype('float32') - labels = np.random.choice(xrange(cls_num), ins_num).reshape( + labels = np.random.choice(range(cls_num), ins_num).reshape( (ins_num, 1)).astype('int32') states = np.random.randint(0, 30, (cls_num, 4)).astype('float32') diff --git a/python/paddle/fluid/tests/unittests/test_preprocessor.py b/python/paddle/fluid/tests/unittests/test_preprocessor.py new file mode 100644 index 0000000000..cbf1a7e0c5 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_preprocessor.py @@ -0,0 +1,93 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +import paddle.fluid as fluid +import paddle.v2 as paddle +import paddle.v2.dataset.mnist as mnist + + +class TestPreprocessor(unittest.TestCase): + def setUp(self): + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(mnist.train(), batch_size=32) + feeder = fluid.DataFeeder( + feed_list=[ # order is image and label + fluid.layers.data( + name='image', shape=[784]), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + self.num_batches = fluid.recordio_writer.convert_reader_to_recordio_file( + './mnist_for_preprocessor_test.recordio', reader, feeder) + + def test_main(self): + N = 10 + + img_expected_res = [] + lbl_expected_res = [] + with fluid.program_guard(fluid.Program(), fluid.Program()): + data_file = fluid.layers.io.open_recordio_file( + './mnist_for_preprocessor_test.recordio', + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + img, lbl = fluid.layers.io.read_file(data_file) + + if fluid.core.is_compiled_with_cuda(): + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for _ in range(N): + img_v, lbl_v = exe.run(fetch_list=[img, lbl]) + img_expected_res.append(img_v / 2) + lbl_expected_res.append(lbl_v + 1) + + img_actual_res = [] + lbl_actual_res = [] + with fluid.program_guard(fluid.Program(), fluid.Program()): + data_file = fluid.layers.io.open_recordio_file( + './mnist_for_preprocessor_test.recordio', + shapes=[[-1, 784], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64']) + preprocessor = fluid.layers.io.Preprocessor(reader=data_file) + with preprocessor.block(): + img, lbl = preprocessor.inputs() + img_out = img / 2 + lbl_out = lbl + 1 + preprocessor.outputs(img_out, lbl_out) + + data_file = fluid.layers.io.double_buffer(preprocessor()) + img, lbl = fluid.layers.io.read_file(data_file) + + if fluid.core.is_compiled_with_cuda(): + place = fluid.CUDAPlace(0) + else: + place = fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(fluid.default_startup_program()) + for _ in range(N): + img_v, lbl_v = exe.run(fetch_list=[img, lbl]) + img_actual_res.append(img_v) + lbl_actual_res.append(lbl_v) + + for idx in range(N): + np.allclose(img_expected_res[idx], img_actual_res[idx]) + np.allclose(lbl_expected_res[idx], lbl_actual_res[idx]) diff --git a/python/paddle/fluid/tests/unittests/test_print_op.py b/python/paddle/fluid/tests/unittests/test_print_op.py index c75080fbb9..b461c5c940 100644 --- a/python/paddle/fluid/tests/unittests/test_print_op.py +++ b/python/paddle/fluid/tests/unittests/test_print_op.py @@ -28,7 +28,7 @@ class TestPrintOpCPU(unittest.TestCase): self.x_tensor = core.LoDTensor() tensor_np = np.random.random(size=(2, 3)).astype('float32') self.x_tensor.set(tensor_np, self.place) - self.x_tensor.set_lod([[0, 1, 1]]) + self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) def build_network(self, only_forward, **kargs): x = layers.data('x', shape=[3], dtype='float32', lod_level=1) @@ -56,13 +56,15 @@ class TestPrintOpCPU(unittest.TestCase): return_numpy=False) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestPrintOpGPU(TestPrintOpCPU): def setUp(self): self.place = core.CUDAPlace(0) self.x_tensor = core.LoDTensor() tensor_np = np.random.random(size=(2, 3)).astype('float32') self.x_tensor.set(tensor_np, self.place) - self.x_tensor.set_lod([[0, 1, 1]]) + self.x_tensor.set_recursive_sequence_lengths([[1, 1]]) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_prior_box_op.py b/python/paddle/fluid/tests/unittests/test_prior_box_op.py index bcbc02a2ba..e15554737b 100644 --- a/python/paddle/fluid/tests/unittests/test_prior_box_op.py +++ b/python/paddle/fluid/tests/unittests/test_prior_box_op.py @@ -32,6 +32,7 @@ class TestPriorBoxOp(OpTest): 'variances': self.variances, 'flip': self.flip, 'clip': self.clip, + 'min_max_aspect_ratios_order': self.min_max_aspect_ratios_order, 'step_w': self.step_w, 'step_h': self.step_h, 'offset': self.offset @@ -52,6 +53,9 @@ class TestPriorBoxOp(OpTest): max_sizes = [5, 10] self.max_sizes = np.array(max_sizes).astype('float32').tolist() + def set_min_max_aspect_ratios_order(self): + self.min_max_aspect_ratios_order = False + def init_test_params(self): self.layer_w = 32 self.layer_h = 32 @@ -71,6 +75,7 @@ class TestPriorBoxOp(OpTest): self.set_max_sizes() self.aspect_ratios = [2.0, 3.0] self.flip = True + self.set_min_max_aspect_ratios_order() self.real_aspect_ratios = [1, 2.0, 1.0 / 2.0, 3.0, 1.0 / 3.0] self.aspect_ratios = np.array( self.aspect_ratios, dtype=np.float).flatten() @@ -78,7 +83,6 @@ class TestPriorBoxOp(OpTest): self.variances = np.array(self.variances, dtype=np.float).flatten() self.clip = True - self.num_priors = len(self.real_aspect_ratios) * len(self.min_sizes) if len(self.max_sizes) > 0: self.num_priors += len(self.max_sizes) @@ -106,26 +110,60 @@ class TestPriorBoxOp(OpTest): idx = 0 for s in range(len(self.min_sizes)): min_size = self.min_sizes[s] - # rest of priors - for r in range(len(self.real_aspect_ratios)): - ar = self.real_aspect_ratios[r] - c_w = min_size * math.sqrt(ar) / 2 - c_h = (min_size / math.sqrt(ar)) / 2 - out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, - (c_y - c_h) / self.image_h, - (c_x + c_w) / self.image_w, - (c_y + c_h) / self.image_h] - idx += 1 - - if len(self.max_sizes) > 0: - max_size = self.max_sizes[s] - # second prior: aspect_ratio = 1, - c_w = c_h = math.sqrt(min_size * max_size) / 2 + if not self.min_max_aspect_ratios_order: + # rest of priors + for r in range(len(self.real_aspect_ratios)): + ar = self.real_aspect_ratios[r] + c_w = min_size * math.sqrt(ar) / 2 + c_h = (min_size / math.sqrt(ar)) / 2 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, (c_y - c_h) / + self.image_h, (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h + ] + idx += 1 + + if len(self.max_sizes) > 0: + max_size = self.max_sizes[s] + # second prior: aspect_ratio = 1, + c_w = c_h = math.sqrt(min_size * max_size) / 2 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, (c_y - c_h) / + self.image_h, (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h + ] + idx += 1 + else: + c_w = c_h = min_size / 2. out_boxes[h, w, idx, :] = [(c_x - c_w) / self.image_w, (c_y - c_h) / self.image_h, (c_x + c_w) / self.image_w, (c_y + c_h) / self.image_h] idx += 1 + if len(self.max_sizes) > 0: + max_size = self.max_sizes[s] + # second prior: aspect_ratio = 1, + c_w = c_h = math.sqrt(min_size * max_size) / 2 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, (c_y - c_h) / + self.image_h, (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h + ] + idx += 1 + + # rest of priors + for r in range(len(self.real_aspect_ratios)): + ar = self.real_aspect_ratios[r] + if abs(ar - 1.) < 1e-6: + continue + c_w = min_size * math.sqrt(ar) / 2 + c_h = (min_size / math.sqrt(ar)) / 2 + out_boxes[h, w, idx, :] = [ + (c_x - c_w) / self.image_w, (c_y - c_h) / + self.image_h, (c_x + c_w) / self.image_w, + (c_y + c_h) / self.image_h + ] + idx += 1 # clip the prior's coordidate such that it is within[0, 1] if self.clip: @@ -137,10 +175,15 @@ class TestPriorBoxOp(OpTest): self.out_var = out_var.astype('float32') -class TestPriorBoxOpWithMaxSize(TestPriorBoxOp): +class TestPriorBoxOpWithoutMaxSize(TestPriorBoxOp): def set_max_sizes(self): self.max_sizes = [] +class TestPriorBoxOpWithSpecifiedOutOrder(TestPriorBoxOp): + def set_min_max_aspect_ratios_order(self): + self.min_max_aspect_ratios_order = True + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index cf6fe14a86..9f8d33f9bb 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -79,12 +79,18 @@ class TestProfiler(unittest.TestCase): pass_acc_calculator.add(value=acc, weight=b_size) pass_acc = pass_acc_calculator.eval() + @unittest.skipIf(not core.is_compiled_with_cuda(), + "profiler is enabled only with GPU") def test_cpu_profiler(self): self.net_profiler('CPU') + @unittest.skipIf(not core.is_compiled_with_cuda(), + "profiler is enabled only with GPU") def test_cuda_profiler(self): self.net_profiler('GPU') + @unittest.skipIf(not core.is_compiled_with_cuda(), + "profiler is enabled only with GPU") def test_all_profiler(self): self.net_profiler('All', '/tmp/profile_out') with open('/tmp/profile_out', 'r') as f: diff --git a/python/paddle/fluid/tests/unittests/test_program.py b/python/paddle/fluid/tests/unittests/test_program.py index c51a482393..0997afc97a 100644 --- a/python/paddle/fluid/tests/unittests/test_program.py +++ b/python/paddle/fluid/tests/unittests/test_program.py @@ -17,6 +17,7 @@ import unittest from paddle.fluid.framework import Program, default_main_program, program_guard, grad_var_name import paddle.fluid.layers as layers +import paddle.fluid as fluid main_program = default_main_program() @@ -98,6 +99,39 @@ class TestProgram(unittest.TestCase): new_program = main_program.clone() self.assertNotEqual(0, len(new_program.blocks[0].all_parameters())) + def test_program_inference_optimize(self): + def net(): + reader = fluid.layers.py_reader( + capacity=10, + shapes=[[-1, 10], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + use_double_buffer=True) + in_data, label = fluid.layers.read_file(reader) + predict_label = fluid.layers.fc(in_data, size=2, act='softmax') + loss = fluid.layers.mean( + fluid.layers.cross_entropy( + input=predict_label, label=label)) + + optimizer = fluid.optimizer.Adam() + optimizer.minimize(loss) + + startup_program = fluid.Program() + main_program = fluid.Program() + with fluid.program_guard(main_program, startup_program): + net() + no_read_program = main_program.inference_optimize() + keep_read_program = main_program.inference_optimize( + export_for_deployment=False) + no_read_ops = no_read_program.global_block().ops + keep_read_ops = keep_read_program.global_block().ops + self.assertEqual(len(keep_read_ops) - len(no_read_ops), 2) + self.assertEqual(keep_read_ops[0].type, 'create_double_buffer_reader') + self.assertEqual(keep_read_ops[1].type, 'read') + + for i in range(len(no_read_ops)): + self.assertEqual(no_read_ops[i].type, keep_read_ops[i + 2].type) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py index 3f9059fb5b..9853fb4e9a 100644 --- a/python/paddle/fluid/tests/unittests/test_protobuf_descs.py +++ b/python/paddle/fluid/tests/unittests/test_protobuf_descs.py @@ -68,7 +68,7 @@ class TestOpDesc(unittest.TestCase): self.assertEqual(8, len(op.attr_names())) op.set_block_attr("block_attr", program_desc.block(0)) - self.assertEqual(0, op.block_attr("block_attr")) + self.assertEqual(0, op.block_attr_id("block_attr")) mul_op = block.append_op() mul_op.set_type("mul") @@ -181,13 +181,13 @@ class TestBlockDesc(unittest.TestCase): self.assertIsNotNone(block) op1 = block.append_op() op2 = block.append_op() - op0 = block.prepend_op() + op0 = block._prepend_op() all_ops = [] - for idx in xrange(0, block.op_size()): + for idx in range(0, block.op_size()): all_ops.append(block.op(idx)) self.assertEqual(all_ops, [op0, op1, op2]) - def test_remove_op(self): + def test__remove_op(self): program = Program() program_desc = program.desc self.assertIsNotNone(program_desc) @@ -201,11 +201,11 @@ class TestBlockDesc(unittest.TestCase): op1.set_type("test") op2.set_type("test") - block.remove_op(1, 2) - program.sync_with_cpp() + block._remove_op(1, 2) + program._sync_with_cpp() all_ops = [] - for idx in xrange(0, block.op_size()): + for idx in range(0, block.op_size()): all_ops.append(block.op(idx)) self.assertEqual(all_ops, [op0, op2]) diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py b/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py new file mode 100644 index 0000000000..f9bda5e470 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_py_reader_push_pop.py @@ -0,0 +1,100 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.fluid as fluid +import numpy as np +from threading import Thread + + +def feed_data(feed_queue, inputs): + for in_data in inputs: + feed_queue.push(in_data) + + +class TestPyReader(unittest.TestCase): + def setUp(self): + self.capacity = 10 + self.batch_size_min = 10 + self.batch_size_max = 20 + self.shapes = [(-1, 3, 2, 1), (-1, 1)] + self.lod_levels = [0, 0] + self.dtypes = ['float32', 'int64'] + self.iterations = 20 + + def test_single_thread_main(self): + self.main(use_thread=False) + + def test_multiple_thread_main(self): + self.main(use_thread=True) + + def main(self, use_thread=False): + with fluid.program_guard(fluid.Program(), fluid.Program()): + place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda( + ) else fluid.CPUPlace() + executor = fluid.Executor(place) + + data_file = fluid.layers.py_reader( + capacity=self.capacity, + dtypes=self.dtypes, + lod_levels=self.lod_levels, + shapes=self.shapes) + feed_queue = data_file.queue + read_out_data = fluid.layers.read_file(data_file) + self.inputs = [] + + for i in range(self.iterations): + in_data = fluid.LoDTensorArray() + batch_size = np.random.random_integers(self.batch_size_min, + self.batch_size_max) + for shape, dtype in zip(self.shapes, self.dtypes): + next_data = np.random.uniform( + low=0, high=1000, + size=(batch_size, ) + shape[1:]).astype(dtype) + in_data.append( + fluid.executor._as_lodtensor(next_data, place)) + + self.inputs.append(in_data) + + executor.run(fluid.default_startup_program()) + self.outputs = [] + if use_thread: + thread = Thread( + target=feed_data, args=(feed_queue, self.inputs)) + thread.start() + for in_data in self.inputs: + self.outputs.append( + executor.run(fetch_list=list(read_out_data))) + else: + for in_data in self.inputs: + feed_queue.push(in_data) + self.outputs.append( + executor.run(fetch_list=list(read_out_data))) + + feed_queue.close() + self.validate() + + def validate(self): + self.assertEqual(len(self.inputs), len(self.outputs)) + for in_data_list, out_data_list in zip(self.inputs, self.outputs): + self.assertEqual(len(in_data_list), len(out_data_list)) + in_data_list_np = [ + np.array(in_lod_tensor) for in_lod_tensor in in_data_list + ] + for in_data, out_data in zip(in_data_list_np, out_data_list): + self.assertTrue((in_data == out_data).all()) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py new file mode 100644 index 0000000000..9a379bdbaa --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_py_reader_using_executor.py @@ -0,0 +1,226 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import paddle.fluid as fluid +import paddle.fluid.core as core +import numpy as np +import threading +import multiprocessing +import os + + +def as_tensor(np_array_or_tensor, place=None): + if isinstance(np_array_or_tensor, fluid.LoDTensor): + return np_array_or_tensor + + if place is None: + place = fluid.CPUPlace() + + tensor = fluid.LoDTensor() + tensor.set(np_array_or_tensor, place) + return tensor + + +def as_numpy(tensor_or_numpy): + return tensor_or_numpy if isinstance( + tensor_or_numpy, np.ndarray) else np.array(tensor_or_numpy) + + +def feed_data(feed_queue, reader): + data_generator = reader() + while True: + data = next(data_generator, None) + if data is None or not feed_queue.push(data): + break + + +def simple_fc_net(in_size, + class_num, + hidden_sizes, + batch_size, + queue_capacity, + use_double_buffer=False): + reader = fluid.layers.py_reader( + capacity=queue_capacity, + shapes=[[-1, in_size], [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + use_double_buffer=False) + feed_queue = reader.queue + reader = fluid.layers.batch(reader, batch_size=batch_size) + if use_double_buffer: + reader = fluid.layers.double_buffer(reader) + + in_data, label = fluid.layers.read_file(reader) + + hidden = in_data + for hidden_size in hidden_sizes: + hidden = fluid.layers.fc( + hidden, + size=hidden_size, + act='tanh', + bias_attr=fluid.ParamAttr( + initializer=fluid.initializer.Constant(value=1.0))) + + predict_label = fluid.layers.fc(hidden, size=class_num, act='softmax') + loss = fluid.layers.mean( + fluid.layers.cross_entropy( + input=predict_label, label=label)) + + optimizer = fluid.optimizer.Adam() + optimizer.minimize(loss) + return in_data, label, loss, optimizer, feed_queue + + +class TestPyReaderUsingExecutor(unittest.TestCase): + def setUp(self): + self.in_size = 1000 + self.hidden_sizes = [50, 30, 20] + self.class_num = 10 + self.batch_size = 32 + self.iterations = 10 + self.queue_capacity = 50 + + def test(self): + for use_cuda in [False, True]: + for use_parallel_executor in [False, True]: + for use_double_buffer in [False, True]: + print('Test Parameters:'), + print({ + 'use_cuda': use_cuda, + 'use_parallel_executor': use_parallel_executor, + 'use_double_buffer': use_double_buffer + }) + self.main(use_cuda, use_parallel_executor, + use_double_buffer) + + def random_reader(self): + def reader(): + self.inputs = [] + cnt = 0 + while True: + tensors = fluid.LoDTensorArray() + in_data = np.random.uniform( + low=0, high=1, size=(1, self.in_size)).astype('float32') + tensors.append(as_tensor(in_data)) + label = np.random.random_integers( + low=0, high=self.class_num - 1, size=(1, 1)).astype('int64') + tensors.append(as_tensor(label)) + + if cnt < self.iterations * self.batch_size * self.batch_size_times: + if cnt % (self.batch_size * self.batch_size_times) == 0: + self.inputs.append([in_data, label]) + else: + self.inputs[-1][0] = np.concatenate( + (self.inputs[-1][0], in_data), axis=0) + self.inputs[-1][1] = np.concatenate( + (self.inputs[-1][1], label), axis=0) + elif not self.use_double_buffer: + break + + yield tensors + cnt += 1 + + yield None + + return reader + + def main(self, + use_cuda=True, + use_parallel_executor=False, + use_double_buffer=False): + assert not use_cuda or use_cuda and core.is_compiled_with_cuda() + + self.use_cuda = use_cuda + self.use_parallel_executor = use_parallel_executor + self.use_double_buffer = use_double_buffer + + startup_program = fluid.Program() + main_program = fluid.Program() + + with fluid.program_guard(main_program, startup_program): + in_data, label, loss, optimizer, feed_queue = simple_fc_net( + in_size=self.in_size, + class_num=self.class_num, + hidden_sizes=self.hidden_sizes, + batch_size=self.batch_size, + queue_capacity=self.queue_capacity, + use_double_buffer=self.use_double_buffer) + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + + startup_exe = fluid.Executor(place) + startup_exe.run(startup_program) + + if use_parallel_executor: + main_exe = fluid.ParallelExecutor(use_cuda, loss_name=loss.name) + if use_cuda: + self.batch_size_times = core.get_cuda_device_count() + else: + self.batch_size_times = int( + os.environ.get('CPU_NUM', multiprocessing.cpu_count())) + else: + main_exe = startup_exe + self.batch_size_times = 1 + + reader = self.random_reader() + thread = threading.Thread( + target=feed_data, args=(feed_queue, reader)) + thread.start() + + self.outputs = [] + for _ in range(self.iterations): + fetches = main_exe.run(fetch_list=[in_data.name, label.name]) + fetches = [as_numpy(fetch) for fetch in fetches] + self.outputs.append(fetches) + + feed_queue.close() + self.validate() + + def validate(self): + self.assertEqual(len(self.inputs), len(self.outputs)) + for batch_in, batch_out in zip(self.inputs, self.outputs): + self.assertEqual(len(batch_in), len(batch_out)) + if self.use_parallel_executor and not self.use_double_buffer: + self.validate_unordered_batch(batch_in, batch_out) + else: + for in_data, out_data in zip(batch_in, batch_out): + self.assertEqual(in_data.shape, out_data.shape) + if not self.use_parallel_executor: + self.assertTrue((in_data == out_data).all()) + + def validate_unordered_batch(self, batch_in, batch_out): + out_index_left_set = set(range(self.batch_size * self.batch_size_times)) + mapping_num = 0 + for i in range(self.batch_size * self.batch_size_times): + for j in out_index_left_set: + flag = True + for k in range(len(batch_in)): + in_data = batch_in[k][i] + out_data = batch_out[k][j] + if (in_data != out_data).any(): + flag = False + break + + if flag: + out_index_left_set.remove(j) + mapping_num += 1 + break + + self.assertEqual(mapping_num, self.batch_size * self.batch_size_times) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_random_crop_op.py b/python/paddle/fluid/tests/unittests/test_random_crop_op.py new file mode 100644 index 0000000000..1c708d0386 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_random_crop_op.py @@ -0,0 +1,46 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle.fluid.core as core +from op_test import OpTest + + +class TestRandomCropOp(OpTest): + def setUp(self): + to_crop = np.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]] * + 5).astype("float32") + self.possible_res = [ + np.array([[1, 2, 3], [5, 6, 7]]), np.array([[2, 3, 4], [6, 7, 8]]), + np.array([[5, 6, 7], [9, 10, 11]]), + np.array([[6, 7, 8], [10, 11, 12]]) + ] + self.op_type = "random_crop" + self.inputs = {'X': to_crop, 'Seed': np.array([10])} + self.outputs = {'Out': np.array([]), 'SeedOut': np.array([])} + self.attrs = {'shape': [2, 3]} + + def test_check_output(self): + self.check_output_customized(self.verify_output) + + def verify_output(self, outs): + out = np.array(outs[1]) + for ins in out[:]: + is_equal = [(ins == res).all() for res in self.possible_res] + self.assertIn(True, is_equal) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_reader_reset.py b/python/paddle/fluid/tests/unittests/test_reader_reset.py new file mode 100644 index 0000000000..3ad85d5748 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_reader_reset.py @@ -0,0 +1,116 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import paddle.fluid as fluid +import paddle.v2 as paddle +import numpy as np +import unittest + + +class TestReaderReset(unittest.TestCase): + def prepare_data(self): + def fake_data_generator(): + for n in range(self.total_ins_num): + yield np.ones(self.ins_shape) * n, n + + # Prepare data + with fluid.program_guard(fluid.Program(), fluid.Program()): + reader = paddle.batch(fake_data_generator, batch_size=1) + feeder = fluid.DataFeeder( + feed_list=[ + fluid.layers.data( + name='data', shape=[3], dtype='float32'), + fluid.layers.data( + name='label', shape=[1], dtype='int64'), + ], + place=fluid.CPUPlace()) + fluid.recordio_writer.convert_reader_to_recordio_file( + self.data_file_name, reader, feeder) + + def setUp(self): + self.use_cuda = fluid.core.is_compiled_with_cuda() + self.data_file_name = './reader_reset_test.recordio' + self.ins_shape = [3] + self.batch_size = 5 + self.total_ins_num = self.batch_size * 20 + self.test_pass_num = 100 + self.prepare_data() + + def main(self, with_double_buffer): + main_prog = fluid.Program() + startup_prog = fluid.Program() + + with fluid.program_guard(main_prog, startup_prog): + data_reader_handle = fluid.layers.io.open_files( + filenames=[self.data_file_name], + shapes=[[-1] + self.ins_shape, [-1, 1]], + lod_levels=[0, 0], + dtypes=['float32', 'int64'], + thread_num=1, + pass_num=1) + data_reader = fluid.layers.io.batch(data_reader_handle, + self.batch_size) + if with_double_buffer: + data_reader = fluid.layers.double_buffer(data_reader) + image, label = fluid.layers.read_file(data_reader) + fetch_list = [image.name, label.name] + + place = fluid.CUDAPlace(0) if self.use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + exe.run(startup_prog) + + build_strategy = fluid.BuildStrategy() + if with_double_buffer: + build_strategy.enable_data_balance = True + exec_strategy = fluid.ExecutionStrategy() + parallel_exe = fluid.ParallelExecutor( + use_cuda=self.use_cuda, + main_program=main_prog, + build_strategy=build_strategy, + exec_strategy=exec_strategy) + + data_appeared = [False] * self.total_ins_num + pass_count = 0 + while (True): + try: + data_val, label_val = parallel_exe.run(fetch_list, + return_numpy=True) + ins_num = data_val.shape[0] + broadcasted_label = np.ones((ins_num, ) + tuple( + self.ins_shape)) * label_val.reshape((ins_num, 1)) + self.assertEqual(data_val.all(), broadcasted_label.all()) + for l in label_val: + self.assertFalse(data_appeared[l[0]]) + data_appeared[l[0]] = True + + except fluid.core.EOFException: + pass_count += 1 + if with_double_buffer: + data_appeared = data_appeared[:-parallel_exe.device_count * + self.batch_size] + for i in data_appeared: + self.assertTrue(i) + if pass_count < self.test_pass_num: + data_appeared = [False] * self.total_ins_num + data_reader_handle.reset() + else: + break + + def test_all(self): + self.main(with_double_buffer=False) + self.main(with_double_buffer=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_recordio_reader.py b/python/paddle/fluid/tests/unittests/test_recordio_reader.py index f32050014d..69a522e273 100644 --- a/python/paddle/fluid/tests/unittests/test_recordio_reader.py +++ b/python/paddle/fluid/tests/unittests/test_recordio_reader.py @@ -68,8 +68,7 @@ class TestRecordIO(unittest.TestCase): while True: try: tmp, = exe.run(fetch_list=[avg_loss]) - except fluid.core.EnforceNotMet as ex: - self.assertIn("There is no next data.", ex.message) + except fluid.core.EOFException: break avg_loss_np.append(tmp) diff --git a/python/paddle/fluid/tests/unittests/test_recurrent_op.py b/python/paddle/fluid/tests/unittests/test_recurrent_op.py index d6ff18430e..2e22df2beb 100644 --- a/python/paddle/fluid/tests/unittests/test_recurrent_op.py +++ b/python/paddle/fluid/tests/unittests/test_recurrent_op.py @@ -203,12 +203,12 @@ class RecurrentOpTest1(unittest.TestCase): num_grad[idx], ana_grad[idx], rtol=0.1).all()) def check_forward(self): - print 'test recurrent op forward' + print('test recurrent op forward') pd_output = self.forward() py_output = self.py_rnn.forward() - print 'pd_output', pd_output + print('pd_output', pd_output) print - print 'py_output', py_output + print('py_output', py_output) self.assertEqual(pd_output.shape, py_output.shape) self.assertTrue(np.isclose(pd_output, py_output, rtol=0.1).all()) @@ -445,7 +445,7 @@ class RecurrentOpNoMemBootTest(RecurrentOpTest1): self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape, self.output_shape) self.output = layers.mean(self.create_rnn_op(), **self.p_info) - print self.main_program + print(self.main_program) def create_rnn_op(self): x = layers.data( diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 9b0cc3534d..06d116601b 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -34,8 +34,10 @@ class TestMeanOp(OpTest): def setUp(self): self.op_type = "reduce_mean" self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} - self.attrs = {'dim': 1} - self.outputs = {'Out': self.inputs['X'].mean(axis=self.attrs['dim'])} + self.attrs = {'dim': [1]} + self.outputs = { + 'Out': self.inputs['X'].mean(axis=tuple(self.attrs['dim'])) + } def test_check_output(self): self.check_output() @@ -50,8 +52,10 @@ class TestMaxOp(OpTest): def setUp(self): self.op_type = "reduce_max" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': -1} - self.outputs = {'Out': self.inputs['X'].max(axis=self.attrs['dim'])} + self.attrs = {'dim': [-1]} + self.outputs = { + 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) + } def test_check_output(self): self.check_output() @@ -63,8 +67,10 @@ class TestMinOp(OpTest): def setUp(self): self.op_type = "reduce_min" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': 2} - self.outputs = {'Out': self.inputs['X'].min(axis=self.attrs['dim'])} + self.attrs = {'dim': [2]} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } def test_check_output(self): self.check_output() @@ -83,14 +89,11 @@ class TestProdOp(OpTest): self.check_grad(['X'], 'Out') -class TestKeepDimReduce(OpTest): +class Test1DReduce(OpTest): def setUp(self): self.op_type = "reduce_sum" - self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} - self.attrs = {'dim': -2, 'keep_dim': True} - self.outputs = { - 'Out': self.inputs['X'].sum(axis=self.attrs['dim'], keepdims=True) - } + self.inputs = {'X': np.random.random(20).astype("float64")} + self.outputs = {'Out': self.inputs['X'].sum(axis=0)} def test_check_output(self): self.check_output() @@ -99,12 +102,91 @@ class TestKeepDimReduce(OpTest): self.check_grad(['X'], 'Out') -class Test1DReduce(OpTest): +class Test2DReduce0(Test1DReduce): def setUp(self): self.op_type = "reduce_sum" - self.inputs = {'X': np.random.random(20).astype("float64")} + self.attrs = {'dim': [0]} + self.inputs = {'X': np.random.random((20, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + +class Test2DReduce1(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.attrs = {'dim': [1]} + self.inputs = {'X': np.random.random((20, 10)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + +class Test3DReduce0(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.attrs = {'dim': [1]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + +class Test3DReduce1(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.attrs = {'dim': [2]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + +class Test3DReduce2(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.attrs = {'dim': [-2]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + +class Test3DReduce3(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.attrs = {'dim': [1, 2]} + self.inputs = {'X': np.random.random((5, 6, 7)).astype("float64")} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) + } + + +class TestKeepDimReduce(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [1], 'keep_dim': True} + self.outputs = { + 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim']), + keepdims=self.attrs['keep_dim']) + } + + +class TestReduceAll(Test1DReduce): + def setUp(self): + self.op_type = "reduce_sum" + self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} + self.attrs = {'reduce_all': True} + self.outputs = {'Out': self.inputs['X'].sum()} + + +## reduction in multi dims +class TestReduceMeanOpMultiAxises(OpTest): + def setUp(self): + self.op_type = "reduce_mean" + self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} + self.attrs = {'dim': [1, 2]} + self.outputs = {'Out': self.inputs['X'].mean(axis=(1, 2))} + def test_check_output(self): self.check_output() @@ -112,12 +194,45 @@ class Test1DReduce(OpTest): self.check_grad(['X'], 'Out') -class TestReduceAll(OpTest): +class TestReduceMaxOpMultiAxises(OpTest): + """Remove Max with subgradient from gradient check to confirm the success of CI.""" + + def setUp(self): + self.op_type = "reduce_max" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [-2, -1]} + self.outputs = { + 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output() + + +class TestReduceMinOpMultiAxises(OpTest): + """Remove Min with subgradient from gradient check to confirm the success of CI.""" + + def setUp(self): + self.op_type = "reduce_min" + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [1, 2]} + self.outputs = { + 'Out': self.inputs['X'].min(axis=tuple(self.attrs['dim'])) + } + + def test_check_output(self): + self.check_output() + + +class TestKeepDimReduceSumMultiAxises(OpTest): def setUp(self): self.op_type = "reduce_sum" - self.inputs = {'X': np.random.random((5, 6, 2, 10)).astype("float64")} - self.attrs = {'reduce_all': True} - self.outputs = {'Out': self.inputs['X'].sum()} + self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} + self.attrs = {'dim': [-2, -1], 'keep_dim': True} + self.outputs = { + 'Out': + self.inputs['X'].sum(axis=tuple(self.attrs['dim']), keepdims=True) + } def test_check_output(self): self.check_output() diff --git a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py index 76d0d2f2fe..6e1cd56b3e 100644 --- a/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_reorder_lod_tensor.py @@ -15,6 +15,7 @@ import unittest import paddle.fluid as fluid import paddle.fluid.core as core +from paddle.fluid.layers.control_flow import lod_rank_table import numpy @@ -34,7 +35,7 @@ class TestReorderLoDTensor(unittest.TestCase): dat.stop_gradient = False rank_dat = fluid.layers.data( name=cls.data_desc[1][0], shape=cls.data_desc[1][1]) - table = fluid.layers.lod_rank_table(rank_dat) + table = lod_rank_table(rank_dat) new_dat = fluid.layers.reorder_lod_tensor_by_rank( x=dat, rank_table=table) loss = fluid.layers.reduce_sum(new_dat) @@ -70,11 +71,10 @@ class TestReorderLoDTensor(unittest.TestCase): lod_level_i = numpy.random.randint( low=1, high=5, - size=self.num_seq if i == 0 else lod_level_i[-1]) - lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist() + size=self.num_seq if i == 0 else sum(lod_level_i)).tolist() data_lod.append(lod_level_i) data_value = numpy.random.random( - size=[data_lod[-1][-1] if data_lod else self.num_seq + size=[sum(data_lod[-1]) if data_lod else self.num_seq ] + data_shape).astype('float32') self.data[data_name] = (data_value, data_lod) @@ -84,29 +84,36 @@ class TestReorderLoDTensor(unittest.TestCase): tensor = fluid.Tensor() tensor.set(self.data[desc[0]][0], place) if self.data[desc[0]][1]: - tensor.set_lod(self.data[desc[0]][1]) + tensor.set_recursive_sequence_lengths(self.data[desc[0]][1]) self.inputs[desc[0]] = tensor def reorder(self): - level = 0 + def convert_to_offset(lod): + offset_lod = [[0] for i in lod] + for i, level in enumerate(lod): + for seq_len in level: + offset_lod[i].append(offset_lod[i][-1] + seq_len) + return offset_lod + level = 0 # compute the rank_table according to ref_lod ref_lod = self.data[self.data_desc[1][0]][1][level] rank_table = [] # list of (index, length) - for i in range(len(ref_lod) - 1): - rank_table.append((i, ref_lod[i + 1] - ref_lod[i])) + for i in range(len(ref_lod)): + rank_table.append((i, ref_lod[i])) rank_table = sorted(rank_table, lambda x, y: y[1] - x[1]) # compute the input sequence info according to input_lod input_value, input_lod = self.data[self.data_desc[0][0]] + offset_lod = convert_to_offset(input_lod) input_table = [] # list of (offset, length, sub_lod) - if input_lod: - for i in range(len(input_lod[level]) - 1): + if offset_lod: + for i in range(len(offset_lod[level]) - 1): start_idx = i end_idx = i + 1 sub_lod = [] - for lod_level_i in input_lod[level:]: + for lod_level_i in offset_lod[level:]: sub_lod_i = [] for idx in range(start_idx, end_idx): sub_lod_i.append(lod_level_i[idx + 1] - lod_level_i[ @@ -132,10 +139,9 @@ class TestReorderLoDTensor(unittest.TestCase): input_seq_sub_lod = input_table[index][2] if len(output_lod) == 0: - output_lod = [[0] for i in input_seq_sub_lod] - for i, sub_lod_i in enumerate(input_seq_sub_lod): - for idx_sub in sub_lod_i: - output_lod[i].append(output_lod[i][-1] + idx_sub) + output_lod = [[] for i in input_seq_sub_lod] + for i, level in enumerate(input_seq_sub_lod): + output_lod[i].extend(level) return output_value, output_lod def test_reorder_lod_tensor(self): @@ -148,7 +154,8 @@ class TestReorderLoDTensor(unittest.TestCase): self.assertTrue( numpy.allclose( numpy.array(actual_output), expect_output, atol=0.001)) - self.assertEqual(expect_output_lod, actual_output.lod()) + self.assertEqual(expect_output_lod, + actual_output.recursive_sequence_lengths()) # check gradient expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad_lod = self.data[self.data_desc[0][0]][1] @@ -156,7 +163,8 @@ class TestReorderLoDTensor(unittest.TestCase): self.assertTrue( numpy.allclose( numpy.array(actual_grad), expect_grad, atol=0.001)) - self.assertEqual(expect_grad_lod, actual_grad.lod()) + self.assertEqual(expect_grad_lod, + actual_grad.recursive_sequence_lengths()) def test_reorder_tensor(self): self.data_desc[0][-1] = 0 # input is tensor @@ -168,7 +176,8 @@ class TestReorderLoDTensor(unittest.TestCase): self.assertTrue( numpy.allclose( numpy.array(actual_output), expect_output, atol=0.001)) - self.assertEqual(expect_output_lod, actual_output.lod()) + self.assertEqual(expect_output_lod, + actual_output.recursive_sequence_lengths()) # check gradient expect_grad = numpy.ones_like(self.data[self.data_desc[0][0]][0]) expect_grad_lod = self.data[self.data_desc[0][0]][1] @@ -176,14 +185,14 @@ class TestReorderLoDTensor(unittest.TestCase): self.assertTrue( numpy.allclose( numpy.array(actual_grad), expect_grad, atol=0.001)) - self.assertEqual(expect_grad_lod, actual_grad.lod()) + self.assertEqual(expect_grad_lod, + actual_grad.recursive_sequence_lengths()) # compare outputs between LodTensors with explicit and implicit lod # use the same data but set the input lod explicitly - input_lod = [[ - i for i in range(len(self.data[self.data_desc[0][0]][0]) + 1) - ]] - self.inputs[self.data_desc[0][0]].set_lod(input_lod) + input_lod = [[1] * len(self.data[self.data_desc[0][0]][0])] + self.inputs[self.data_desc[0][0]].set_recursive_sequence_lengths( + input_lod) # preserve the output of LodTensor with implicit lod to compare expect_output = [ numpy.array(actual_output) for actual_output in self.actual_outputs diff --git a/python/paddle/fluid/tests/unittests/test_reshape_op.py b/python/paddle/fluid/tests/unittests/test_reshape_op.py index f51b5a7e99..2f5558578a 100644 --- a/python/paddle/fluid/tests/unittests/test_reshape_op.py +++ b/python/paddle/fluid/tests/unittests/test_reshape_op.py @@ -25,7 +25,7 @@ class TestReshapeOp(OpTest): self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape, "inplace": False} + self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(new_shape)} def test_check_output(self): @@ -42,7 +42,7 @@ class TestReshapeOpDimInfer1(OpTest): self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape, "inplace": False} + self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(self.attrs["shape"])} def test_check_output(self): @@ -60,7 +60,7 @@ class TestReshapeOpDimInfer2(OpTest): self.op_type = "reshape" self.inputs = {"X": np.random.random(ori_shape).astype("float32")} - self.attrs = {"shape": new_shape, "inplace": False} + self.attrs = {"shape": new_shape} self.outputs = {"Out": self.inputs["X"].reshape(infered_shape)} def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_reverse_op.py b/python/paddle/fluid/tests/unittests/test_reverse_op.py new file mode 100644 index 0000000000..f845575a02 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_reverse_op.py @@ -0,0 +1,67 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestReverseOp(OpTest): + def initTestCase(self): + self.x = np.random.random((3, 4)).astype('float32') + self.axis = [0] + + def setUp(self): + self.initTestCase() + self.op_type = "reverse" + self.inputs = {"X": self.x} + self.attrs = {'axis': self.axis} + out = self.x + for a in self.axis: + out = np.flip(out, axis=a) + self.outputs = {'Out': out} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(['X'], 'Out') + + +class TestCase0(TestReverseOp): + def initTestCase(self): + self.x = np.random.random((3, 4)).astype('float32') + self.axis = [1] + + +class TestCase1(TestReverseOp): + def initTestCase(self): + self.x = np.random.random((3, 4)).astype('float32') + self.axis = [0, 1] + + +class TestCase2(TestReverseOp): + def initTestCase(self): + self.x = np.random.random((3, 4, 5)).astype('float32') + self.axis = [0, 2] + + +class TestCase3(TestReverseOp): + def initTestCase(self): + self.x = np.random.random((3, 4, 5)).astype('float32') + self.axis = [1, 2] + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py index 3d754aff3a..df5684ab17 100644 --- a/python/paddle/fluid/tests/unittests/test_roi_pool_op.py +++ b/python/paddle/fluid/tests/unittests/test_roi_pool_op.py @@ -107,7 +107,7 @@ class TestROIPoolOp(OpTest): rois = [] self.rois_lod = [[]] for bno in range(self.batch_size): - self.rois_lod[0].append(len(rois)) + self.rois_lod[0].append(bno + 1) for i in range(bno + 1): x1 = np.random.random_integers( 0, self.width / self.spatial_scale - self.pooled_width) @@ -121,7 +121,6 @@ class TestROIPoolOp(OpTest): roi = [bno, x1, y1, x2, y2] rois.append(roi) - self.rois_lod[0].append(len(rois)) self.rois_num = len(rois) self.rois = np.array(rois).astype("int64") diff --git a/python/paddle/fluid/tests/unittests/test_row_conv_op.py b/python/paddle/fluid/tests/unittests/test_row_conv_op.py index 30f1efbcbc..07dcd10868 100644 --- a/python/paddle/fluid/tests/unittests/test_row_conv_op.py +++ b/python/paddle/fluid/tests/unittests/test_row_conv_op.py @@ -19,8 +19,10 @@ from op_test import OpTest def row_conv_forward(x, lod, wt): out = np.zeros_like(x) - seq_info = lod[0] - num_sequences = len(seq_info) - 1 + num_sequences = len(lod[0]) + seq_info = [0] + for seq_len in lod[0]: + seq_info.append(seq_info[-1] + seq_len) context_length = wt.shape[0] for i in range(num_sequences): # loop over number of sequences @@ -32,7 +34,6 @@ def row_conv_forward(x, lod, wt): cur_timesteps = end - start for j in range(cur_timesteps): # loop over different timesteps for k in range(context_length): - if j + k >= cur_timesteps: continue curoutput[j, :] += curinput[j + k, :] * wt[k, :] @@ -44,8 +45,8 @@ class TestRowConvOp1(OpTest): def setUp(self): self.op_type = "row_conv" - lod = [[0, 2, 5, 7]] - T = lod[0][-1] + lod = [[2, 3, 2]] + T = sum(lod[0]) D = 16 context_length = 2 @@ -75,8 +76,8 @@ class TestRowConvOp2(OpTest): def setUp(self): self.op_type = "row_conv" - lod = [[0, 20, 50, 100]] - T = lod[0][-1] + lod = [[20, 30, 50]] + T = sum(lod[0]) D = 35 context_length = 35 diff --git a/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py new file mode 100644 index 0000000000..df6e0faaca --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_rpn_target_assign_op.py @@ -0,0 +1,103 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +import paddle.fluid.core as core +from op_test import OpTest + + +def rpn_target_assign(iou, rpn_batch_size_per_im, rpn_positive_overlap, + rpn_negative_overlap, fg_fraction): + iou = np.transpose(iou) + anchor_to_gt_max = iou.max(axis=1) + gt_to_anchor_argmax = iou.argmax(axis=0) + gt_to_anchor_max = iou[gt_to_anchor_argmax, np.arange(iou.shape[1])] + anchors_with_max_overlap = np.where(iou == gt_to_anchor_max)[0] + + tgt_lbl = np.ones((iou.shape[0], ), dtype=np.int32) * -1 + tgt_lbl[anchors_with_max_overlap] = 1 + tgt_lbl[anchor_to_gt_max >= rpn_positive_overlap] = 1 + + num_fg = int(fg_fraction * rpn_batch_size_per_im) + fg_inds = np.where(tgt_lbl == 1)[0] + if len(fg_inds) > num_fg: + disable_inds = np.random.choice( + fg_inds, size=(len(fg_inds) - num_fg), replace=False) + tgt_lbl[disable_inds] = -1 + fg_inds = np.where(tgt_lbl == 1)[0] + + num_bg = rpn_batch_size_per_im - np.sum(tgt_lbl == 1) + bg_inds = np.where(anchor_to_gt_max < rpn_negative_overlap)[0] + if len(bg_inds) > num_bg: + enable_inds = bg_inds[np.random.randint(len(bg_inds), size=num_bg)] + tgt_lbl[enable_inds] = 0 + bg_inds = np.where(tgt_lbl == 0)[0] + + loc_index = fg_inds + score_index = np.hstack((fg_inds, bg_inds)) + tgt_lbl = np.expand_dims(tgt_lbl, axis=1) + return loc_index, score_index, tgt_lbl + + +class TestRpnTargetAssignOp(OpTest): + def setUp(self): + iou = np.random.random((10, 8)).astype("float32") + self.op_type = "rpn_target_assign" + self.inputs = {'DistMat': iou} + self.attrs = { + 'rpn_batch_size_per_im': 256, + 'rpn_positive_overlap': 0.95, + 'rpn_negative_overlap': 0.3, + 'fg_fraction': 0.25, + 'fix_seed': True + } + loc_index, score_index, tgt_lbl = rpn_target_assign(iou, 256, 0.95, 0.3, + 0.25) + self.outputs = { + 'LocationIndex': loc_index, + 'ScoreIndex': score_index, + 'TargetLabel': tgt_lbl, + } + + def test_check_output(self): + self.check_output() + + +class TestRpnTargetAssignOp2(OpTest): + def setUp(self): + iou = np.random.random((10, 20)).astype("float32") + self.op_type = "rpn_target_assign" + self.inputs = {'DistMat': iou} + self.attrs = { + 'rpn_batch_size_per_im': 128, + 'rpn_positive_overlap': 0.5, + 'rpn_negative_overlap': 0.5, + 'fg_fraction': 0.5, + 'fix_seed': True + } + loc_index, score_index, tgt_lbl = rpn_target_assign(iou, 128, 0.5, 0.5, + 0.5) + self.outputs = { + 'LocationIndex': loc_index, + 'ScoreIndex': score_index, + 'TargetLabel': tgt_lbl, + } + + def test_check_output(self): + self.check_output() + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_selected_rows.py b/python/paddle/fluid/tests/unittests/test_selected_rows.py index 3d7b86787f..f504a06fff 100644 --- a/python/paddle/fluid/tests/unittests/test_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_selected_rows.py @@ -40,12 +40,12 @@ class TestSelectedRows(unittest.TestCase): # compare tensor self.assertAlmostEqual(2.0, - selected_rows.get_tensor().get_float_element(0)) + selected_rows.get_tensor()._get_float_element(0)) self.assertAlmostEqual(1.0, - selected_rows.get_tensor().get_float_element(1)) + selected_rows.get_tensor()._get_float_element(1)) self.assertAlmostEqual( 4.0, - selected_rows.get_tensor().get_float_element(2 * row_numel + 8)) + selected_rows.get_tensor()._get_float_element(2 * row_numel + 8)) if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_seq_concat_op.py b/python/paddle/fluid/tests/unittests/test_seq_concat_op.py index 10592d127f..11ffa761a6 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_concat_op.py +++ b/python/paddle/fluid/tests/unittests/test_seq_concat_op.py @@ -18,14 +18,19 @@ import sys from op_test import OpTest -def to_abs_lod(lod): - if len(lod) == 0 or len(lod) == 1: - return lod +def to_abs_offset_lod(lod): + offset_lod = [[0] for i in lod] + for i, level in enumerate(lod): + for seq_len in level: + offset_lod[i].append(offset_lod[i][-1] + seq_len) + + if len(offset_lod) == 0 or len(offset_lod) == 1: + return offset_lod import copy - new_lod = copy.deepcopy(lod) - for idx, val in enumerate(lod[0]): - new_lod[0][idx] = lod[1][val] - return new_lod + new_offset_lod = copy.deepcopy(offset_lod) + for idx, val in enumerate(offset_lod[0]): + new_offset_lod[0][idx] = offset_lod[1][val] + return new_offset_lod def seq_concat(inputs, level): @@ -35,11 +40,11 @@ def seq_concat(inputs, level): x1 = inputs['X'][1][1][0] level_idx = len(lod0) - level - 1 outs = [] - for i in range(len(lod0[level_idx]) - 1): - sub_x0 = x0[to_abs_lod(lod0)[level_idx][i]:to_abs_lod(lod0)[level_idx][ - i + 1], :] - sub_x1 = x1[to_abs_lod(lod1)[level_idx][i]:to_abs_lod(lod1)[level_idx][ - i + 1], :] + for i in range(len(lod0[level_idx])): + sub_x0 = x0[to_abs_offset_lod(lod0)[level_idx][i]:to_abs_offset_lod( + lod0)[level_idx][i + 1], :] + sub_x1 = x1[to_abs_offset_lod(lod1)[level_idx][i]:to_abs_offset_lod( + lod1)[level_idx][i + 1], :] outs.append(np.concatenate((sub_x0, sub_x1), axis=0)) return np.concatenate(outs, axis=0) @@ -48,9 +53,9 @@ class TestSeqConcatOp(OpTest): def set_data(self): # two level, batch size is 3 x0 = np.random.random((4, 6, 3)).astype('float32') - lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] + lod0 = [[2, 2], [1, 1, 1, 1]] x1 = np.random.random((4, 8, 3)).astype('float32') - lod1 = [[0, 2, 4], [0, 1, 2, 3, 4]] + lod1 = [[2, 2], [1, 1, 1, 1]] axis = 1 level = 1 self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} @@ -72,14 +77,14 @@ class TestSeqConcatOpLevelZeroNestedSequence(TestSeqConcatOp): def set_data(self): # two level, batch size is 3 x0 = np.random.random((4, 6, 3)).astype('float32') - lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] + lod0 = [[2, 2], [1, 1, 1, 1]] x1 = np.random.random((7, 6, 3)).astype('float32') - lod1 = [[0, 2, 4], [0, 1, 3, 5, 7]] + lod1 = [[2, 2], [1, 2, 2, 2]] axis = 0 level = 0 self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.attrs = {'axis': axis, 'level': level} - out_lod = [[0, 2, 4], [0, 2, 5, 8, 11]] + out_lod = [[2, 2], [2, 3, 3, 3]] self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} @@ -87,14 +92,14 @@ class TestSeqConcatOplevelOneNestedSequence(TestSeqConcatOp): def set_data(self): # two level, batch size is 3 x0 = np.random.random((4, 6, 3)).astype('float32') - lod0 = [[0, 2, 4], [0, 1, 2, 3, 4]] + lod0 = [[2, 2], [1, 1, 1, 1]] x1 = np.random.random((7, 6, 3)).astype('float32') - lod1 = [[0, 3, 4], [0, 1, 3, 5, 7]] + lod1 = [[3, 1], [1, 2, 2, 2]] axis = 0 level = 1 self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.attrs = {'axis': axis, 'level': level} - out_lod = [[0, 5, 8], [0, 1, 2, 3, 5, 7, 8, 9, 11]] + out_lod = [[5, 3], [1, 1, 1, 2, 2, 1, 1, 2]] self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} @@ -102,14 +107,14 @@ class TestSeqConcatOpLevelZeroSequence(TestSeqConcatOp): def set_data(self): # two level, batch size is 3 x0 = np.random.random((4, 3, 4)).astype('float32') - lod0 = [[0, 1, 2, 3, 4]] + lod0 = [[1, 1, 1, 1]] x1 = np.random.random((7, 3, 4)).astype('float32') - lod1 = [[0, 1, 3, 5, 7]] + lod1 = [[1, 2, 2, 2]] axis = 0 level = 0 self.inputs = {'X': [('x0', (x0, lod0)), ('x1', (x1, lod1))]} self.attrs = {'axis': axis, 'level': level} - out_lod = [[0, 2, 5, 8, 11]] + out_lod = [[2, 3, 3, 3]] self.outputs = {'Out': (seq_concat(self.inputs, level), out_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_seq_conv.py b/python/paddle/fluid/tests/unittests/test_seq_conv.py index 51dbf1f618..1a6e1aad79 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_conv.py +++ b/python/paddle/fluid/tests/unittests/test_seq_conv.py @@ -26,9 +26,9 @@ class TestSeqProject(OpTest): if self.context_length == 1 \ and self.context_start == 0 \ and self.padding_trainable: - print "If context_start is 0 " \ + print("If context_start is 0 " \ "and context_length is 1," \ - " padding_trainable should be false." + " padding_trainable should be false.") return # one level, batch size @@ -75,35 +75,38 @@ class TestSeqProject(OpTest): pading_data = self.pad_data out = np.zeros((self.input_size[0], self.context_length * self.input_size[1])).astype('float32') - lod = lod[0] + offset = [0] + for seq_len in lod[0]: + offset.append(offset[-1] + seq_len) begin_pad = np.max([0, -self.context_start]) - for i in range(len(lod) - 1): + for i in range(len(offset) - 1): for j in range(self.context_length): - in_begin = lod[i] + self.context_start + j - in_end = lod[i + 1] + self.context_start + j - out_begin = lod[i] - out_end = lod[i + 1] - if in_begin < lod[i]: - pad_size = np.min([lod[i] - in_begin, lod[i + 1] - lod[i]]) + in_begin = offset[i] + self.context_start + j + in_end = offset[i + 1] + self.context_start + j + out_begin = offset[i] + out_end = offset[i + 1] + if in_begin < offset[i]: + pad_size = np.min( + [offset[i] - in_begin, offset[i + 1] - offset[i]]) if self.padding_trainable: sub_w = pading_data[j:j + pad_size, :] - out[lod[i]:lod[i] + pad_size, j * self.input_size[1]:( - j + 1) * self.input_size[1]] = sub_w - out_begin = lod[i] + pad_size - in_begin = lod[i] + out[offset[i]:offset[i] + pad_size, j * self.input_size[ + 1]:(j + 1) * self.input_size[1]] = sub_w + out_begin = offset[i] + pad_size + in_begin = offset[i] - if in_end > lod[i + 1]: + if in_end > offset[i + 1]: pad_size = np.min( - [in_end - lod[i + 1], lod[i + 1] - lod[i]]) + [in_end - offset[i + 1], offset[i + 1] - offset[i]]) if self.padding_trainable: sub_w = pading_data[begin_pad + self.context_start + j - pad_size:begin_pad + self.context_start + j, :] - out[lod[i + 1] - pad_size:lod[i + 1], j * self. + out[offset[i + 1] - pad_size:offset[i + 1], j * self. input_size[1]:(j + 1) * self.input_size[1]] = sub_w - in_end = lod[i + 1] - out_end = lod[i + 1] - pad_size + in_end = offset[i + 1] + out_end = offset[i + 1] - pad_size if in_end <= in_begin: continue @@ -175,7 +178,11 @@ class TestSeqProject(OpTest): self.context_stride = 1 self.input_size = [self.input_row, 23] - self.lod = [[0, 4, 5, 8, self.input_row]] + offset_lod = [[0, 4, 5, 8, self.input_row]] + self.lod = [[]] + # convert from offset-based lod to length-based lod + for i in range(len(offset_lod[0]) - 1): + self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i]) self.output_represention = 8 # output feature size @@ -188,7 +195,11 @@ class TestSeqProjectCase1(TestSeqProject): self.context_stride = 1 self.input_size = [self.input_row, 23] - self.lod = [[0, 4, 5, 8, self.input_row]] + offset_lod = [[0, 4, 5, 8, self.input_row]] + self.lod = [[]] + # convert from offset-based lod to length-based lod + for i in range(len(offset_lod[0]) - 1): + self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i]) self.output_represention = 8 # output feature size @@ -201,10 +212,14 @@ class TestSeqProjectCase2(TestSeqProject): self.context_stride = 1 self.input_size = [self.input_row, 23] - idx = range(self.input_size[0]) + idx = list(range(self.input_size[0])) del idx[0] - self.lod = [[0] + np.sort(random.sample(idx, 8)).tolist() + - [self.input_size[0]]] + offset_lod = [[0] + np.sort(random.sample(idx, 8)).tolist() + + [self.input_size[0]]] + self.lod = [[]] + # convert from offset-based lod to length-based lod + for i in range(len(offset_lod[0]) - 1): + self.lod[0].append(offset_lod[0][i + 1] - offset_lod[0][i]) self.output_represention = 8 # output feature size diff --git a/python/paddle/fluid/tests/unittests/test_seq_pool.py b/python/paddle/fluid/tests/unittests/test_seq_pool.py index 2e48ef0e88..0b3659d7a6 100644 --- a/python/paddle/fluid/tests/unittests/test_seq_pool.py +++ b/python/paddle/fluid/tests/unittests/test_seq_pool.py @@ -18,26 +18,34 @@ from op_test import OpTest class TestSeqAvgPool(OpTest): + def convert_to_offset(self, lod): + offset = [[0] for i in lod] + for i, level in enumerate(lod): + for seq_len in level: + offset[i].append(offset[i][-1] + seq_len) + return offset + def set_data(self): self.op_type = 'sequence_pool' # one level, batch size is 4 x = np.random.uniform(0.1, 1, [11, 23]).astype('float32') - lod = [[0, 4, 5, 8, 11]] + lod = [[4, 1, 3, 3]] self.inputs = {'X': (x, lod)} + offset = self.convert_to_offset(lod) out = np.zeros((4, 23)).astype('float32') self.outputs = {'Out': out} - return x, lod, out + return x, offset, out - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "AVERAGE"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] out[i] = sub_x.mean(axis=0) def setUp(self): - x, lod, out = self.set_data() - self.compute(x, lod, out) + x, offset, out = self.set_data() + self.compute(x, offset, out) def test_check_output(self): self.check_output() @@ -50,10 +58,10 @@ class TestSeqAvgPool(OpTest): class TestSeqSumPool(TestSeqAvgPool): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "SUM"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] out[i] = sub_x.sum(axis=0) @@ -61,46 +69,47 @@ class TestSeqMaxPool(TestSeqAvgPool): def set_data(self): self.op_type = 'sequence_pool' x = np.random.uniform(0.1, 1, [13, 23]).astype('float32') - lod = [[0, 4, 5, 8, 13]] - for i in range(4): - l = lod[0][i + 1] - lod[0][i] - x[lod[0][i] + np.random.randint(l), :] += 2.0 + lod = [[4, 1, 3, 5]] + offset = self.convert_to_offset(lod) + for i in range(len(offset[0]) - 1): + l = offset[0][i + 1] - offset[0][i] + x[offset[0][i] + np.random.randint(l), :] += 2.0 self.inputs = {'X': (x, lod)} out = np.zeros((4, 23)).astype('float32') self.outputs = {'Out': out} - return x, lod, out + return x, offset, out - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "MAX"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] out[i] = np.amax(sub_x, axis=0) class TestSeqSqrtPool(TestSeqAvgPool): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "SQRT"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - len = lod[0][i + 1] - lod[0][i] - out[i] = sub_x.sum(axis=0) / np.sqrt(len) + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] + seq_len = offset[0][i + 1] - offset[0][i] + out[i] = sub_x.sum(axis=0) / np.sqrt(seq_len) class TestSeqLastPool(TestSeqAvgPool): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "LAST"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] out[i] = sub_x[-1, :] class TestSeqFirstPool(TestSeqAvgPool): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "FIRST"} - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] + for i in range(len(offset[0]) - 1): + sub_x = x[offset[0][i]:offset[0][i + 1], :] out[i] = sub_x[0, :] @@ -109,35 +118,39 @@ class TestSeqAvgPool2D(TestSeqAvgPool): self.op_type = 'sequence_pool' # one level, batch size is 4 x = np.random.uniform(0.1, 1, [13, 3, 17]).astype('float32') - lod = [[0, 4, 5, 8, 13]] + lod = [[4, 1, 3, 5]] self.inputs = {'X': (x, lod)} + offset = self.convert_to_offset(lod) out = np.zeros((4, 3, 17)).astype('float32') self.outputs = {'Out': out} - return x, lod, out + return x, offset, out - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "AVERAGE"} - for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) + for i in range(len(offset[0]) - 1): + sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :], + (-1, 3 * 17)) out[i] = np.reshape(sub_x.mean(axis=0), (3, 17)) class TestSeqSumPool2D(TestSeqAvgPool2D): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "SUM"} - for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) + for i in range(len(offset[0]) - 1): + sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :], + (-1, 3 * 17)) out[i] = np.reshape(sub_x.sum(axis=0), (3, 17)) class TestSeqSqrtPool2D(TestSeqAvgPool2D): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "SQRT"} - for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) - len = lod[0][i + 1] - lod[0][i] - out[i] = np.reshape(sub_x.sum(axis=0) / np.sqrt(len), (3, 17)) + for i in range(len(offset[0]) - 1): + sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :], + (-1, 3 * 17)) + seq_len = offset[0][i + 1] - offset[0][i] + out[i] = np.reshape(sub_x.sum(axis=0) / np.sqrt(seq_len), (3, 17)) def test_check_grad(self): # Remove MaxIndex after check_grad is refined. @@ -150,36 +163,40 @@ class TestSeqMaxPool2D(TestSeqAvgPool2D): def set_data(self): self.op_type = 'sequence_pool' x = np.random.uniform(0.1, 1, [13, 3, 11]).astype('float32') - lod = [[0, 4, 5, 8, 13]] + lod = [[4, 1, 3, 5]] self.inputs = {'X': (x, lod)} - for i in range(4): - l = lod[0][i + 1] - lod[0][i] - x[lod[0][i] + np.random.randint(l), :] += 1.0 + offset = self.convert_to_offset(lod) + for i in range(len(offset[0]) - 1): + l = offset[0][i + 1] - offset[0][i] + x[offset[0][i] + np.random.randint(l), :] += 1.0 out = np.zeros((4, 3, 11)).astype('float32') self.outputs = {'Out': out} - return x, lod, out + return x, offset, out - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "MAX"} - for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 11)) + for i in range(len(offset[0]) - 1): + sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :], + (-1, 3 * 11)) out[i] = np.reshape(np.amax(sub_x, axis=0), (3, 11)) class TestSeqLastPool2D(TestSeqAvgPool2D): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "LAST"} - for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) + for i in range(len(offset[0]) - 1): + sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :], + (-1, 3 * 17)) out[i] = np.reshape(sub_x[-1, :], (3, 17)) class TestSeqFirstPool2D(TestSeqAvgPool2D): - def compute(self, x, lod, out): + def compute(self, x, offset, out): self.attrs = {'pooltype': "FIRST"} - for i in range(4): - sub_x = np.reshape(x[lod[0][i]:lod[0][i + 1], :], (-1, 3 * 17)) + for i in range(len(offset[0]) - 1): + sub_x = np.reshape(x[offset[0][i]:offset[0][i + 1], :], + (-1, 3 * 17)) out[i] = np.reshape(sub_x[0, :], (3, 17)) diff --git a/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py b/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py index ebab77e804..8f0765277a 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_erase_op.py @@ -18,15 +18,17 @@ from op_test import OpTest def sequence_erase(in_seq, lod0, tokens): - new_lod0 = [0] + new_lod0 = [] out_seq = [] - for i in range(0, len(lod0) - 1): + offset = 0 + for i in range(0, len(lod0)): num_out = 0 - for dat in in_seq[lod0[i]:lod0[i + 1]]: + for dat in in_seq[offset:(offset + lod0[i])]: if dat not in tokens: out_seq.append(dat) num_out += 1 - new_lod0.append(new_lod0[-1] + num_out) + offset += lod0[i] + new_lod0.append(num_out) return np.array(out_seq).astype("int32"), new_lod0 @@ -34,7 +36,7 @@ class TestSequenceEraseOpInt32(OpTest): def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") - lod = [[0, 9, 13, 24, 30]] + lod = [[9, 4, 11, 6]] tokens = [2, 3, 5] out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens) self.attrs = {'tokens': tokens} @@ -49,7 +51,7 @@ class TestSequenceEraseOpInt64(OpTest): def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int64") - lod = [[0, 9, 13, 24, 30]] + lod = [[9, 4, 11, 6]] tokens = [2, 3, 5] out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens) self.attrs = {'tokens': tokens} @@ -64,7 +66,7 @@ class TestSequenceEraseOpEmpty(OpTest): def setUp(self): self.op_type = "sequence_erase" in_seq = np.random.randint(0, 10, (30, 1)).astype("int32") - lod = [[0, 9, 13, 24, 30]] + lod = [[9, 4, 11, 6]] tokens = [] out_seq, new_lod0 = sequence_erase(in_seq, lod[0], tokens) self.attrs = {'tokens': tokens} diff --git a/python/paddle/fluid/tests/unittests/test_sequence_expand.py b/python/paddle/fluid/tests/unittests/test_sequence_expand.py index 4c8ec1426c..5ff0dab23e 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_expand.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_expand.py @@ -21,7 +21,7 @@ class TestSequenceExpand(OpTest): def set_data(self): x_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32') - y_lod = [[0, 1, 4, 8]] + y_lod = [[1, 3, 4]] self.inputs = {'X': x_data, 'Y': (y_data, y_lod)} def compute(self): @@ -37,23 +37,27 @@ class TestSequenceExpand(OpTest): out = np.zeros(shape=((0, ) + x_data.shape[1:]), dtype=x_data.dtype) if x_lod is None: - x_idx = [i for i in xrange(x_data.shape[0] + 1)] + # x_idx = [i for i in xrange(x_data.shape[0] + 1)] + x_idx = [1] * x_data.shape[0] else: x_idx = x_lod[0] - out_lod = [[0]] + out_lod = [[]] + + offset = 0 + for i in range(len(y_lod[ref_level])): + repeat_num = y_lod[ref_level][i] + x_len = x_idx[i] - for i in xrange(1, len(y_lod[ref_level])): - repeat_num = y_lod[ref_level][i] - y_lod[ref_level][i - 1] - x_len = x_idx[i] - x_idx[i - 1] if repeat_num > 0: - x_sub = x_data[x_idx[i - 1]:x_idx[i], :] + x_sub = x_data[offset:(offset + x_len), :] stacked_x_sub = x_sub for r in range(repeat_num - 1): stacked_x_sub = np.vstack((stacked_x_sub, x_sub)) out = np.vstack((out, stacked_x_sub)) if x_lod is not None: - for j in xrange(repeat_num): - out_lod[0].append(out_lod[0][-1] + x_len) + for j in range(repeat_num): + out_lod[0].append(x_len) + offset += x_len if x_lod is None: self.outputs = {'Out': out} @@ -75,9 +79,9 @@ class TestSequenceExpand(OpTest): class TestSequenceExpandCase1(TestSequenceExpand): def set_data(self): x_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32') - x_lod = [[0, 2, 5]] + x_lod = [[2, 3]] y_data = np.random.uniform(0.1, 1, [13, 1]).astype('float32') - y_lod = [[0, 2, 5], [0, 2, 4, 7, 10, 13]] + y_lod = [[2, 3], [2, 2, 3, 3, 3]] self.inputs = {'X': x_data, 'Y': (y_data, y_lod)} self.attrs = {'ref_level': 0} @@ -85,9 +89,9 @@ class TestSequenceExpandCase1(TestSequenceExpand): class TestSequenceExpandCase2(TestSequenceExpand): def set_data(self): x_data = np.random.uniform(0.1, 1, [1, 2, 2]).astype('float32') - x_lod = [[0, 1]] + x_lod = [[1]] y_data = np.random.uniform(0.1, 1, [2, 2, 2]).astype('float32') - y_lod = [[0, 2], [0, 2]] + y_lod = [[2], [1, 1]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} self.attrs = {'ref_level': 0} @@ -95,9 +99,9 @@ class TestSequenceExpandCase2(TestSequenceExpand): class TestSequenceExpandCase3(TestSequenceExpand): def set_data(self): x_data = np.random.uniform(0.1, 1, [4, 1]).astype('float32') - x_lod = [[0, 1, 2, 3, 4]] - y_data = np.random.uniform(0.1, 1, [6, 1]).astype('float32') - y_lod = [[0, 2, 4, 4, 6]] + x_lod = [[1, 1, 1, 1]] + y_data = np.random.uniform(0.1, 1, [8, 1]).astype('float32') + y_lod = [[2, 2, 2, 2]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} @@ -105,9 +109,9 @@ class TestSequenceExpandCase4(TestSequenceExpand): def set_data(self): data = np.random.uniform(0.1, 1, [5 * 2, 1]) x_data = np.array(data).reshape([5, 2]).astype('float32') - x_lod = [[0, 2, 5]] - y_data = np.random.uniform(0.1, 1, [3, 1]).astype('float32') - y_lod = [[0, 1, 3], [0, 1, 3]] + x_lod = [[2, 3]] + y_data = np.random.uniform(0.1, 1, [5, 1]).astype('float32') + y_lod = [[2], [2, 3]] self.inputs = {'X': (x_data, x_lod), 'Y': (y_data, y_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_sequence_reshape.py b/python/paddle/fluid/tests/unittests/test_sequence_reshape.py index efeab56039..39b02ecf6d 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_reshape.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_reshape.py @@ -22,7 +22,7 @@ class TestSequenceReshape(OpTest): def setUp(self): self.op_type = 'sequence_reshape' dimension = 12 - x_lod = [[0, 4, 5, 8, 11]] + x_lod = [[4, 1, 3, 3]] x = np.random.uniform(0.1, 1, [11, 24]).astype('float32') self.inputs = {'X': (x, x_lod)} @@ -34,13 +34,13 @@ class TestSequenceReshape(OpTest): def compute_output(self, x, x_lod, dimension): x_width = x.shape[1] - out_lod = [[0]] - for i in xrange(len(x_lod[0]) - 1): - seq_len = x_lod[0][i + 1] - x_lod[0][i] + out_lod = [[]] + for i in range(len(x_lod[0])): + seq_len = x_lod[0][i] offset = (seq_len * x_width) / dimension assert int(offset) * dimension == seq_len * x_width - out_lod[0].append(out_lod[0][-1] + int(offset)) - out = np.zeros(shape=(out_lod[0][-1], dimension)).astype('float32') + out_lod[0].append(int(offset)) + out = np.zeros(shape=(sum(out_lod[0]), dimension)).astype('float32') out.ravel()[:] = x.ravel()[:] return out, out_lod @@ -55,7 +55,7 @@ class TestSequenceReshape_reduce(TestSequenceReshape): def setUp(self): self.op_type = 'sequence_reshape' dimension = 24 - x_lod = [[0, 4, 6, 8, 12]] + x_lod = [[4, 2, 2, 4]] x = np.random.uniform(0.1, 1, [12, 12]).astype('float32') self.inputs = {'X': (x, x_lod)} @@ -70,7 +70,7 @@ class TestSequenceReshape_same(TestSequenceReshape): def setUp(self): self.op_type = 'sequence_reshape' dimension = 12 - x_lod = [[0, 4, 6, 8, 12]] + x_lod = [[4, 2, 2, 4]] x = np.random.uniform(0.1, 1, [12, 12]).astype('float32') self.inputs = {'X': (x, x_lod)} diff --git a/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py b/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py index 660b4a171d..313e485d1e 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_slice_op.py @@ -29,20 +29,20 @@ class TestSequenceSliceOp(OpTest): self.inputs = {'X': (x, lod), 'Offset': offset, 'Length': length} outs = [] #np.zeros((100, 3, 2)).astype('float32') - out_lod = [[0]] - out_lod_offset = 0 + out_lod = [[]] + lod_offset = 0 for i in range(len(offset)): - sub_x = x[lod[0][i] + offset[i, 0]:lod[0][i] + offset[i, 0] + + sub_x = x[lod_offset + offset[i, 0]:lod_offset + offset[i, 0] + length[i, 0], :] - out_lod_offset = out_lod_offset + len(sub_x) outs.append(sub_x) - out_lod[0].append(out_lod_offset) + out_lod[0].append(len(sub_x)) + lod_offset += lod[0][i] outs = np.concatenate(outs, axis=0) self.outputs = {'Out': (outs, out_lod)} def init_test_case(self): self.x_dim = (100, 3, 2) - self.x_lod = [[0, 20, 40, 60, 80, 100]] + self.x_lod = [[20, 20, 20, 20, 20]] self.offset = [[1], [2], [3], [4], [5]] self.length = [[10], [8], [6], [4], [2]] diff --git a/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py index d6dc99bb31..c4fc8b74cf 100644 --- a/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_sequence_softmax_op.py @@ -26,15 +26,16 @@ class TestSequenceSoftmaxOp(OpTest): self.init_op_type() x = np.random.uniform(0.1, 1, (11, 1)).astype("float32") - lod = [[0, 4, 5, 8, 11]] + lod = [[4, 1, 3, 3]] out = np.zeros((11, 1)).astype("float32") - for i in range(4): - sub_x = x[lod[0][i]:lod[0][i + 1], :] - sub_x = sub_x.reshape(1, lod[0][i + 1] - lod[0][i]) + offset = 0 + for i in range(len(lod[0])): + sub_x = x[offset:offset + lod[0][i], :] + sub_x = sub_x.reshape(1, lod[0][i]) sub_out = stable_softmax(sub_x) - out[lod[0][i]:lod[0][i + 1], :] = sub_out.reshape( - lod[0][i + 1] - lod[0][i], 1) + out[offset:offset + lod[0][i], :] = sub_out.reshape(lod[0][i], 1) + offset += lod[0][i] self.inputs = {"X": (x, lod)} self.outputs = {"Out": out} @@ -60,6 +61,8 @@ class TestSequenceSoftmaxOp(OpTest): # ----------------cudnn Sequencesoftmax---------------- +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSequenceSoftmaxCUDNNOp(TestSequenceSoftmaxOp): def init_op_type(self): self.use_cudnn = True diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py new file mode 100644 index 0000000000..a62ee05007 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -0,0 +1,47 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestShapeOp(OpTest): + def setUp(self): + self.op_type = "shape" + self.config() + self.shape = [2, 3] + input = np.zeros(self.shape) + self.inputs = {'Input': input} + self.outputs = {'Out': np.array(self.shape)} + + def config(self): + self.shape = [2, 3] + + def test_check_output(self): + self.check_output() + + +class case1(TestShapeOp): + def config(self): + self.shape = [2] + + +class case2(TestShapeOp): + def config(self): + self.shape = [1, 2, 3] + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py index 1d93230e7b..a994bf181a 100644 --- a/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py +++ b/python/paddle/fluid/tests/unittests/test_shrink_rnn_memory.py @@ -21,6 +21,9 @@ from paddle.fluid.framework import default_main_program, switch_main_program from paddle.fluid.framework import Program import numpy as np +from paddle.fluid.layers.control_flow import shrink_memory +from paddle.fluid.layers.control_flow import lod_rank_table + class TestShrinkRNNMemoryBase(unittest.TestCase): def setUp(self): @@ -30,23 +33,23 @@ class TestShrinkRNNMemoryBase(unittest.TestCase): x.stop_gradient = False rank_table_tensor = layers.data( 'rank_table_tensor', shape=[1], dtype='float32', lod_level=1) - table = layers.lod_rank_table(x=rank_table_tensor) + table = lod_rank_table(x=rank_table_tensor) i = layers.zeros(dtype='int64', shape=[1]) - self.mem1 = layers.shrink_memory(x=x, i=i, table=table) + self.mem1 = shrink_memory(x=x, i=i, table=table) i = layers.increment(x=i) i.stop_gradient = True - self.mem2 = layers.shrink_memory(x=self.mem1, i=i, table=table) + self.mem2 = shrink_memory(x=self.mem1, i=i, table=table) i = layers.increment(x=i) i.stop_gradient = True - self.mem3 = layers.shrink_memory(x=self.mem2, i=i, table=table) + self.mem3 = shrink_memory(x=self.mem2, i=i, table=table) mem3_mean = layers.mean(self.mem3) append_backward(loss=mem3_mean) self.x_grad = self.main_program.global_block().var('x@GRAD') def sum_lodtensor(self, tensor): sum_res = 0.0 - for i in xrange(np.product(tensor.get_dims())): - sum_res += tensor.get_float_element(i) + for i in range(np.product(tensor.shape())): + sum_res += tensor._get_float_element(i) return sum_res @@ -54,12 +57,12 @@ class TestShrinkRNNMemoryReferLoD(TestShrinkRNNMemoryBase): def test_refer_lod(self): cpu = core.CPUPlace() x_tensor = core.LoDTensor() - x_tensor.set_lod([[0, 2, 5, 6]]) + x_tensor.set_recursive_sequence_lengths([[2, 3, 1]]) tensor_np = np.random.random(size=(6, 100)).astype('float32') x_tensor.set(tensor_np, cpu) rank_table_tensor = core.LoDTensor() - rank_table_tensor.set_lod([[0, 1, 3, 6]]) + rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]]) rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'), cpu) @@ -83,7 +86,7 @@ class TestShrinkRNNMemoryNoLoD(TestShrinkRNNMemoryBase): x_tensor.set(tensor_np, cpu) rank_table_tensor = core.LoDTensor() - rank_table_tensor.set_lod([[0, 1, 3, 6]]) + rank_table_tensor.set_recursive_sequence_lengths([[1, 2, 3]]) rank_table_tensor.set(np.random.random(size=(6, 1)).astype('float32'), cpu) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py new file mode 100644 index 0000000000..1a48bce3bb --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -0,0 +1,62 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest + + +class TestSliceOp(OpTest): + def setUp(self): + self.op_type = "slice" + self.config() + self.inputs = {'Input': self.input} + self.outputs = {'Out': self.out} + self.attrs = { + 'axes': self.axes, + 'starts': self.starts, + 'ends': self.ends + } + + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float32") + self.starts = [1, 0, 2] + self.ends = [3, 3, 4] + self.axes = [0, 1, 2] + self.out = self.input[1:3, 0:3, 2:4, :] + + def test_check_output(self): + self.check_output() + + +class TestCase1(TestSliceOp): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float32") + self.starts = [-3, 0, 2] + self.ends = [3, 100, -1] + self.axes = [0, 1, 2] + self.out = self.input[-3:3, 0:100, 2:-1, :] + + +class TestCase2(TestSliceOp): + def config(self): + self.input = np.random.random([3, 4, 5, 6]).astype("float32") + self.starts = [-3, 0, 2] + self.ends = [3, 100, -1] + self.axes = [0, 1, 3] + self.out = self.input[-3:3, 0:100, :, 2:-1] + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_split_var.py b/python/paddle/fluid/tests/unittests/test_slice_var.py similarity index 71% rename from python/paddle/fluid/tests/unittests/test_split_var.py rename to python/paddle/fluid/tests/unittests/test_slice_var.py index 79d387f006..82305b23a1 100644 --- a/python/paddle/fluid/tests/unittests/test_split_var.py +++ b/python/paddle/fluid/tests/unittests/test_slice_var.py @@ -14,22 +14,14 @@ import math import unittest -from paddle.fluid.transpiler.distribute_transpiler import split_dense_variable +from paddle.fluid.transpiler.distribute_transpiler import slice_variable import paddle.fluid as fluid import paddle.fluid.core as core import random -class TestSplitVar(unittest.TestCase): - def test_check_output(self): - # split below shapes to 10 servers - shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] - expected_sizes = [ - [15], [1024], - [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], - [2040, 2040, 2040, 2040], - [1150, 1150, 1150, 1150, 1150, 1150, 1100] - ] +class TestSliceVar(unittest.TestCase): + def check_slice_output(self, shapes, expected_sizes, min_size): var_list = [] program = fluid.Program() for shape in shapes: @@ -39,7 +31,7 @@ class TestSplitVar(unittest.TestCase): # dtype=core.VarDesc.VarType.LOD_TENSOR, shape=shape) var_list.append(var) - blocks = split_dense_variable(var_list, 10) + blocks = slice_variable(var_list, 10, min_size) all_sizes = [] for s in expected_sizes: for s2 in s: @@ -48,6 +40,25 @@ class TestSplitVar(unittest.TestCase): varname, block_id, size = block_str.split(":") self.assertEqual(int(size), all_sizes[i]) + def test_1k(self): + shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10]] + expected_sizes = [ + [15], [1024], + [2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 2352, 784], + [2040, 2040, 2040, 2040], + [1150, 1150, 1150, 1150, 1150, 1150, 1100] + ] + + self.check_slice_output(shapes, expected_sizes, 1024) + + def test_check_output_8k(self): + shapes = [[3, 5], [1024], [28, 784], [8, 1020], [800, 10], + [6, 33, 33, 33]] + expected_sizes = [[15], [1024], [10976, 10976], [8160], [8000], + [35937, 35937, 35937, 35937, 35937, 35937]] + + self.check_slice_output(shapes, expected_sizes, 8192) + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index 279f3073f7..70ad05597c 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -26,15 +26,22 @@ def stable_softmax(x): class TestSoftmaxOp(OpTest): + def get_x_shape(self): + return [10, 10] + def setUp(self): self.op_type = "softmax" self.use_cudnn = False self.use_mkldnn = False self.dtype = np.float32 self.init_kernel_type() + self.shape = self.get_x_shape() + + x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype) + out = np.apply_along_axis(stable_softmax, 1, + x.reshape([-1, self.shape[-1]])) + out = out.reshape(self.shape) - x = np.random.uniform(0.1, 1, [10, 10]).astype(self.dtype) - out = np.apply_along_axis(stable_softmax, 1, x) self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} self.attrs = { @@ -63,11 +70,27 @@ class TestSoftmaxOp(OpTest): self.check_grad(["X"], "Out", max_relative_error=0.01) +class TestSoftmaxOp2(TestSoftmaxOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSoftmaxCUDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_cudnn = True +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSoftmaxFP16Op(TestSoftmaxOp): def init_kernel_type(self): self.dtype = np.float16 @@ -79,6 +102,15 @@ class TestSoftmaxFP16Op(TestSoftmaxOp): self.check_output_with_place(place, atol=1e-3) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxFP16Op2(TestSoftmaxFP16Op): + def get_x_shape(self): + return [2, 3, 4, 5] + + +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_cudnn = True @@ -91,10 +123,22 @@ class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp): self.check_output_with_place(place, atol=1e-3) +@unittest.skipIf(not core.is_compiled_with_cuda(), + "core is not compiled with CUDA") +class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + class TestSoftmaxMKLDNNOp(TestSoftmaxOp): def init_kernel_type(self): self.use_mkldnn = True +class TestSoftmaxMKLDNNOp2(TestSoftmaxMKLDNNOp): + def get_x_shape(self): + return [2, 3, 4, 5] + + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py index 02cc7da849..ea1146166d 100644 --- a/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_and_merge_lod_tensor_op.py @@ -19,6 +19,8 @@ import paddle.fluid.layers as layers from paddle.fluid.framework import Program, program_guard from paddle.fluid.executor import Executor from paddle.fluid.backward import append_backward +from paddle.fluid.layers.control_flow import split_lod_tensor +from paddle.fluid.layers.control_flow import merge_lod_tensor class TestCPULoDTensorArrayOps(unittest.TestCase): @@ -56,7 +58,7 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def test_split_and_merge_lod_tensor_level_0(self): tensor = core.LoDTensor() tensor.set(np.arange(10).reshape(10, 1).astype('int32'), self.place()) - tensor.set_lod([[0, 3, 9, 10]]) + tensor.set_recursive_sequence_lengths([[3, 6, 1]]) mask_np = np.array([0, 1, 0]).astype('bool') mask_np = np.expand_dims(mask_np, axis=1) @@ -68,15 +70,15 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): expect_true_tensor = np.expand_dims(expect_true_tensor, axis=1) expect_true = core.LoDTensor() expect_true.set(expect_true_tensor, self.place()) - expect_true.set_lod([[0, 6]]) + expect_true.set_recursive_sequence_lengths([[6]]) expect_false_tensor = np.array([0, 1, 2, 9]).astype('int32') expect_false_tensor = np.expand_dims(expect_false_tensor, axis=1) - expect_false_lod = [[0, 3, 4]] + expect_false_lod = [[3, 1]] expect_false = core.LoDTensor() expect_false.set(expect_false_tensor, self.place()) - expect_false.set_lod(expect_false_lod) + expect_false.set_recursive_sequence_lengths(expect_false_lod) self.main( tensor=tensor, @@ -96,12 +98,11 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): y = layers.data(name='y', shape=[1]) y.persistable = True - out_true, out_false = layers.split_lod_tensor( - input=x, mask=y, level=level) + out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) out_true.persistable = True out_false.persistable = True - out = layers.merge_lod_tensor( + out = merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) out.persistable = True @@ -126,7 +127,8 @@ class TestCPULoDTensorArrayOps(unittest.TestCase): def check_tensor_same(self, actual, expect): self.assertTrue(np.allclose(np.array(actual), np.array(expect))) - self.assertEqual(actual.lod(), expect.lod()) + self.assertEqual(actual.recursive_sequence_lengths(), + expect.recursive_sequence_lengths()) class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): @@ -141,9 +143,8 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): level = 0 - out_true, out_false = layers.split_lod_tensor( - input=x, mask=y, level=level) - out = layers.merge_lod_tensor( + out_true, out_false = split_lod_tensor(input=x, mask=y, level=level) + out = merge_lod_tensor( in_true=out_true, in_false=out_false, mask=y, x=x, level=level) mean = layers.mean(out) @@ -151,7 +152,7 @@ class TestCPUSplitMergeLoDTensorGrad(unittest.TestCase): tensor = core.LoDTensor() tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place) - tensor.set_lod([[0, 3, 9, 10]]) + tensor.set_recursive_sequence_lengths([[3, 6, 1]]) mask_np = np.array([0, 1, 0]).astype('bool') mask_np = np.expand_dims(mask_np, axis=1) diff --git a/python/paddle/fluid/tests/unittests/test_split_ids_op.py b/python/paddle/fluid/tests/unittests/test_split_ids_op.py index e9f0a06a56..ca78613098 100644 --- a/python/paddle/fluid/tests/unittests/test_split_ids_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_ids_op.py @@ -15,6 +15,8 @@ import unittest import numpy as np from op_test import OpTest +import paddle.fluid.core as core +from paddle.fluid.op import Operator class TestSplitIdsOp(OpTest): @@ -31,5 +33,55 @@ class TestSplitIdsOp(OpTest): self.check_output() +class TestSpliteIds(unittest.TestCase): + def get_places(self): + places = [core.CPUPlace()] + return places + + def test_check_output(self): + for place in self.get_places(): + self.check_with_place(place) + + def check_with_place(self, place): + scope = core.Scope() + rows = [0, 5, 7, 4, 9] + height = 20 + row_numel = 2 + + # initialize input variable X + x = scope.var('X').get_selected_rows() + x.set_rows(rows) + x.set_height(height) + np_array = np.ones((len(rows), row_numel)).astype("float32") + for i in range(len(rows)): + for j in range(row_numel): + np_array[i, j] = rows[i] + j + x_tensor = x.get_tensor() + x_tensor.set(np_array, place) + + outs_name = ["out%d" % i for i in xrange(3)] + outs = [ + scope.var(var_name).get_selected_rows() for var_name in outs_name + ] + + # expected output selected rows + expected_out_rows = [[0, 9], [7, 4], [5]] + + op = Operator("split_ids", Ids="X", Out=outs_name) + + for _ in range(3): + op.run(scope, place) + + for i in range(len(outs)): + expected_rows = expected_out_rows[i] + self.assertEqual(outs[i].rows(), expected_rows) + for j in range(len(expected_rows)): + row = expected_rows[j] + self.assertAlmostEqual( + float(row), np.array(outs[i].get_tensor())[j, 0]) + self.assertAlmostEqual( + float(row + 1), np.array(outs[i].get_tensor())[j, 1]) + + if __name__ == '__main__': unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_split_op.py b/python/paddle/fluid/tests/unittests/test_split_op.py index eb49a53e54..6b67a52e81 100644 --- a/python/paddle/fluid/tests/unittests/test_split_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_op.py @@ -26,7 +26,7 @@ class TestSplitOp(OpTest): self.inputs = {'X': x} self.attrs = {'axis': axis, 'sections': [2, 1, 2]} self.outputs = {'Out': [('out%d' % i, out[i]) \ - for i in xrange(len(out))]} + for i in range(len(out))]} def _set_op_type(self): self.op_type = "split" diff --git a/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py index 61040a39ce..2b261820e0 100644 --- a/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py +++ b/python/paddle/fluid/tests/unittests/test_split_selected_rows_op.py @@ -53,7 +53,7 @@ class TestSpliteSelectedRows(unittest.TestCase): height_sections = [5, 5, 5, 5, 3] # initialize output variables [out0, out1] - outs_name = ["out%d" % i for i in xrange(len(height_sections))] + outs_name = ["out%d" % i for i in range(len(height_sections))] outs = [ scope.var(var_name).get_selected_rows() for var_name in outs_name ] diff --git a/python/paddle/fluid/tests/unittests/test_spp_op.py b/python/paddle/fluid/tests/unittests/test_spp_op.py index f0ab5909df..3cbfc2a703 100644 --- a/python/paddle/fluid/tests/unittests/test_spp_op.py +++ b/python/paddle/fluid/tests/unittests/test_spp_op.py @@ -26,7 +26,7 @@ class TestSppOp(OpTest): input = np.random.random(self.shape).astype("float32") nsize, csize, hsize, wsize = input.shape out_level_flatten = [] - for i in xrange(self.pyramid_height): + for i in range(self.pyramid_height): bins = np.power(2, i) kernel_size = [0, 0] padding = [0, 0] diff --git a/python/paddle/fluid/tests/unittests/test_squeeze_op.py b/python/paddle/fluid/tests/unittests/test_squeeze_op.py new file mode 100644 index 0000000000..bca6af2fd5 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_squeeze_op.py @@ -0,0 +1,114 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +from op_test import OpTest + + +# Correct: General. +class TestSqueezeOp(OpTest): + def setUp(self): + self.op_type = "squeeze" + self.init_test_case() + self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} + self.init_attrs() + self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + def init_test_case(self): + self.ori_shape = (1, 3, 1, 5) + self.axes = (0, 2) + self.new_shape = (3, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": False} + + +# Correct: There is mins axis. +class TestSqueezeOp1(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 3, 1, 5) + self.axes = (0, -2) + self.new_shape = (3, 5) + + +# Correct: No axes input. +class TestSqueezeOp2(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 3, 1, 5) + self.axes = () + self.new_shape = (3, 5) + + +# Correct: Just part of axes be squeezed. +class TestSqueezeOp3(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 1, 5, 1, 4, 1) + self.axes = (1, -1) + self.new_shape = (3, 5, 1, 4) + + +# Correct: Inplace. +class TestSqueezeOpInplace1(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 3, 1, 5) + self.axes = (0, 2) + self.new_shape = (3, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +# Correct: Inplace. There is mins axis. +class TestSqueezeOpInplace2(TestSqueezeOp): + def inti_test_case(self): + self.ori_shape = (1, 3, 1, 5) + self.axes = (0, -2) + self.new_shape = (3, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +# Correct: Inplace. No axes input. +class TestSqueezeOpInplace3(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (1, 3, 1, 5) + self.axes = () + self.new_shape = (3, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +# Correct: Inpalce. Just part of axes be squeezed. +class TestSqueezeOpInplace4(TestSqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 1, 5, 1, 4, 1) + self.axes = (1, -1) + self.new_shape = (3, 5, 1, 4) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py b/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py new file mode 100644 index 0000000000..7956897d68 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_sum_mkldnn_op.py @@ -0,0 +1,26 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest + +from test_sum_op import TestSumOp + + +class TestMKLDNN(TestSumOp): + def init_kernel_type(self): + self.use_mkldnn = True + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 2faf5b1064..1d90414e13 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -20,12 +20,15 @@ from op_test import OpTest class TestSumOp(OpTest): def setUp(self): self.op_type = "sum" + self.use_mkldnn = False + self.init_kernel_type() x0 = np.random.random((3, 4)).astype('float32') x1 = np.random.random((3, 4)).astype('float32') x2 = np.random.random((3, 4)).astype('float32') self.inputs = {"X": [("x0", x0), ("x1", x1), ("x2", x2)]} y = x0 + x1 + x2 self.outputs = {'Out': y} + self.attrs = {'use_mkldnn': self.use_mkldnn} def test_check_output(self): self.check_output() @@ -33,6 +36,9 @@ class TestSumOp(OpTest): def test_check_grad(self): self.check_grad(['x0'], 'Out') + def init_kernel_type(self): + pass + if __name__ == "__main__": unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_target_assign_op.py b/python/paddle/fluid/tests/unittests/test_target_assign_op.py index ccb41e56c5..bd20889752 100644 --- a/python/paddle/fluid/tests/unittests/test_target_assign_op.py +++ b/python/paddle/fluid/tests/unittests/test_target_assign_op.py @@ -22,22 +22,23 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod): if len(gt_lod) != len(neg_lod): raise AssertionError("The input arguments are illegal.") - batch_size = len(gt_lod) - 1 + batch_size = len(gt_lod) match_indices = -1 * np.ones((batch_size, num_prior)).astype('int32') - neg_indices = np.zeros((neg_lod[-1], 1)).astype('int32') + neg_indices = np.zeros((sum(neg_lod), 1)).astype('int32') + offset = 0 for n in range(batch_size): - gt_num = gt_lod[n + 1] - gt_lod[n] + gt_num = gt_lod[n] ids = random.sample([i for i in range(num_prior)], gt_num) match_indices[n, ids] = [i for i in range(gt_num)] ret_ids = set([i for i in range(num_prior)]) - set(ids) - s = neg_lod[n] - e = neg_lod[n + 1] - l = e - s + l = neg_lod[n] neg_ids = random.sample(ret_ids, l) - neg_indices[s:e, :] = np.array(neg_ids).astype('int32').reshape(l, 1) + neg_indices[offset:offset + neg_lod[n], :] = np.array(neg_ids).astype( + 'int32').reshape(l, 1) + offset += neg_lod[n] return match_indices, neg_indices @@ -56,24 +57,28 @@ def target_assign(encoded_box, gt_label, match_indices, neg_indices, gt_lod, # init weight for target label trg_label_wt = np.zeros((batch_size, num_prior, 1)).astype('float32') + gt_offset = 0 + neg_offset = 0 for i in range(batch_size): cur_indices = match_indices[i] col_ids = np.where(cur_indices > -1) col_val = cur_indices[col_ids] - gt_start = gt_lod[i] # target bbox - for v, c in zip(col_val + gt_start, col_ids[0].tolist()): + for v, c in zip(col_val + gt_offset, col_ids[0].tolist()): trg_box[i][c][:] = encoded_box[v][c][:] # weight for target bbox trg_box_wt[i][col_ids] = 1.0 - trg_label[i][col_ids] = gt_label[col_val + gt_start] + trg_label[i][col_ids] = gt_label[col_val + gt_offset] trg_label_wt[i][col_ids] = 1.0 # set target label weight to 1.0 for the negative samples if neg_indices is not None: - neg_ids = neg_indices[neg_lod[i]:neg_lod[i + 1]] + neg_ids = neg_indices[neg_offset:neg_offset + neg_lod[i]] trg_label_wt[i][neg_ids] = 1.0 + # update offset + gt_offset += gt_lod[i] + neg_offset += neg_lod[i] return trg_box, trg_box_wt, trg_label, trg_label_wt @@ -83,11 +88,11 @@ class TestTargetAssginFloatType(OpTest): self.op_type = "target_assign" num_prior = 120 num_class = 21 - gt_lod = [0, 5, 11, 23] - neg_lod = [0, 4, 7, 13] + gt_lod = [5, 6, 12] + neg_lod = [4, 3, 6] mismatch_value = 0 - batch_size = len(gt_lod) - 1 - num_gt = gt_lod[-1] + batch_size = len(gt_lod) + num_gt = sum(gt_lod) encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') gt_label = np.random.randint( @@ -121,11 +126,11 @@ class TestTargetAssginIntType(OpTest): self.op_type = "target_assign" num_prior = 120 num_class = 21 - gt_lod = [0, 5, 11, 23] - neg_lod = [0, 4, 7, 13] + gt_lod = [5, 6, 12] + neg_lod = [4, 3, 6] mismatch_value = 0 - batch_size = len(gt_lod) - 1 - num_gt = gt_lod[-1] + batch_size = len(gt_lod) + num_gt = sum(gt_lod) encoded_box = np.random.random((num_gt, num_prior, 4)).astype('float32') gt_label = np.random.randint( diff --git a/python/paddle/fluid/tests/unittests/test_tensor.py b/python/paddle/fluid/tests/unittests/test_tensor.py index 379081c328..5ccc876ae8 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor.py +++ b/python/paddle/fluid/tests/unittests/test_tensor.py @@ -25,8 +25,8 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() - tensor.set_dims([1000, 784]) - tensor.alloc_int(place) + tensor._set_dims([1000, 784]) + tensor._alloc_int(place) tensor_array = numpy.array(tensor) self.assertEqual((1000, 784), tensor_array.shape) tensor_array[3, 9] = 1 @@ -44,8 +44,8 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() - tensor.set_dims([1000, 784]) - tensor.alloc_float(place) + tensor._set_dims([1000, 784]) + tensor._alloc_float(place) tensor_array = numpy.array(tensor) self.assertEqual((1000, 784), tensor_array.shape) @@ -63,21 +63,20 @@ class TestTensor(unittest.TestCase): var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() - lod_tensor.set_dims([4, 4, 6]) - lod_tensor.alloc_int(place) + lod_tensor._set_dims([4, 4, 6]) + lod_tensor._alloc_int(place) array = numpy.array(lod_tensor) array[0, 0, 0] = 3 array[3, 3, 5] = 10 lod_tensor.set(array, place) - lod_tensor.set_lod([[0, 2, 4]]) + lod_tensor.set_recursive_sequence_lengths([[2, 2]]) lod_v = numpy.array(lod_tensor) self.assertTrue(numpy.alltrue(array == lod_v)) - lod = lod_tensor.lod() - self.assertEqual(0, lod[0][0]) + lod = lod_tensor.recursive_sequence_lengths() + self.assertEqual(2, lod[0][0]) self.assertEqual(2, lod[0][1]) - self.assertEqual(4, lod[0][2]) def test_float_lod_tensor(self): place = core.CPUPlace() @@ -85,8 +84,8 @@ class TestTensor(unittest.TestCase): var_lod = scope.var("test_lod_tensor") lod_tensor = var_lod.get_tensor() - lod_tensor.set_dims([5, 2, 3, 4]) - lod_tensor.alloc_float(place) + lod_tensor._set_dims([5, 2, 3, 4]) + lod_tensor._alloc_float(place) tensor_array = numpy.array(lod_tensor) self.assertEqual((5, 2, 3, 4), tensor_array.shape) @@ -97,22 +96,21 @@ class TestTensor(unittest.TestCase): lod_v = numpy.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) - self.assertEqual(len(lod_tensor.lod()), 0) + self.assertEqual(len(lod_tensor.recursive_sequence_lengths()), 0) - lod_py = [[0, 2, 5], [0, 2, 4, 5]] - lod_tensor.set_lod(lod_py) - lod = lod_tensor.lod() + lod_py = [[2, 1], [1, 2, 2]] + lod_tensor.set_recursive_sequence_lengths(lod_py) + lod = lod_tensor.recursive_sequence_lengths() self.assertListEqual(lod_py, lod) def test_lod_tensor_init(self): - scope = core.Scope() place = core.CPUPlace() - lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_py = [[2, 1], [1, 2, 2]] lod_tensor = core.LoDTensor() - lod_tensor.set_dims([5, 2, 3, 4]) - lod_tensor.set_lod(lod_py) - lod_tensor.alloc_float(place) + lod_tensor._set_dims([5, 2, 3, 4]) + lod_tensor.set_recursive_sequence_lengths(lod_py) + lod_tensor._alloc_float(place) tensor_array = numpy.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 @@ -121,18 +119,18 @@ class TestTensor(unittest.TestCase): lod_v = numpy.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) - self.assertListEqual(lod_py, lod_tensor.lod()) + self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths()) def test_lod_tensor_gpu_init(self): if not core.is_compiled_with_cuda(): return place = core.CUDAPlace(0) - lod_py = [[0, 2, 5], [0, 2, 4, 5]] + lod_py = [[2, 1], [1, 2, 2]] lod_tensor = core.LoDTensor() - lod_tensor.set_dims([5, 2, 3, 4]) - lod_tensor.set_lod(lod_py) - lod_tensor.alloc_float(place) + lod_tensor._set_dims([5, 2, 3, 4]) + lod_tensor.set_recursive_sequence_lengths(lod_py) + lod_tensor._alloc_float(place) tensor_array = numpy.array(lod_tensor) tensor_array[0, 0, 0, 0] = 1.0 tensor_array[0, 0, 0, 1] = 2.0 @@ -141,7 +139,7 @@ class TestTensor(unittest.TestCase): lod_v = numpy.array(lod_tensor) self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0]) self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1]) - self.assertListEqual(lod_py, lod_tensor.lod()) + self.assertListEqual(lod_py, lod_tensor.recursive_sequence_lengths()) def test_empty_tensor(self): place = core.CPUPlace() @@ -150,15 +148,15 @@ class TestTensor(unittest.TestCase): tensor = var.get_tensor() - tensor.set_dims([0, 1]) - tensor.alloc_float(place) + tensor._set_dims([0, 1]) + tensor._alloc_float(place) tensor_array = numpy.array(tensor) self.assertEqual((0, 1), tensor_array.shape) if core.is_compiled_with_cuda(): gpu_place = core.CUDAPlace(0) - tensor.alloc_float(gpu_place) + tensor._alloc_float(gpu_place) tensor_array = numpy.array(tensor) self.assertEqual((0, 1), tensor_array.shape) diff --git a/python/paddle/fluid/tests/unittests/test_top_k_op.py b/python/paddle/fluid/tests/unittests/test_top_k_op.py index cc2fcc5ec0..cbc3da5503 100644 --- a/python/paddle/fluid/tests/unittests/test_top_k_op.py +++ b/python/paddle/fluid/tests/unittests/test_top_k_op.py @@ -28,7 +28,7 @@ class TestTopkOp(OpTest): self.inputs = {'X': input} self.attrs = {'k': k} - for rowid in xrange(32): + for rowid in range(32): row = input[rowid] output[rowid] = np.sort(row)[-k:] indices[rowid] = row.argsort()[-k:] @@ -52,7 +52,7 @@ class TestTopkOp3d(OpTest): self.inputs = {'X': input_flat_2d} self.attrs = {'k': k} - for rowid in xrange(64): + for rowid in range(64): row = input_flat_2d[rowid] output[rowid] = np.sort(row)[-k:] indices[rowid] = row.argsort()[-k:] diff --git a/python/paddle/fluid/tests/unittests/test_unpool_op.py b/python/paddle/fluid/tests/unittests/test_unpool_op.py index a97d6dfdda..ecce4cdde2 100644 --- a/python/paddle/fluid/tests/unittests/test_unpool_op.py +++ b/python/paddle/fluid/tests/unittests/test_unpool_op.py @@ -22,10 +22,10 @@ def unpool2dmax_forward_naive(input, indices, ksize, strides, paddings): out_hsize = (s2 - 1) * strides[0] - 2 * paddings[0] + ksize[0] out_wsize = (s2 - 1) * strides[1] - 2 * paddings[1] + ksize[1] out = np.zeros((s0, s1, out_hsize, out_wsize)) - for nidx in xrange(s0): - for cidx in xrange(s1): - for h in xrange(s2): - for w in xrange(s3): + for nidx in range(s0): + for cidx in range(s1): + for h in range(s2): + for w in range(s3): index = indices[nidx, cidx, h, w] hidx = (index - index % out_wsize) / out_wsize widx = index % out_wsize @@ -47,16 +47,16 @@ class TestUnpoolOp(OpTest): self.strides[1] + 1 input = np.zeros((nsize, csize, hsize_out, wsize_out)) indices = np.zeros((nsize, csize, hsize_out, wsize_out)) - for i in xrange(hsize_out): - for j in xrange(wsize_out): + for i in range(hsize_out): + for j in range(wsize_out): r_start = np.max((i * self.strides[0] - self.paddings[0], 0)) r_end = np.min((i * self.strides[0] + self.ksize[0] - \ self.paddings[0], hsize)) c_start = np.max((j * self.strides[1] - self.paddings[1], 0)) c_end = np.min((j * self.strides[1] + self.ksize[1] - \ self.paddings[1], wsize)) - for nidx in xrange(nsize): - for cidx in xrange(csize): + for nidx in range(nsize): + for cidx in range(csize): x_masked = pre_input[nidx, cidx, r_start:r_end, \ c_start:c_end] input[nidx, cidx, i, j] = x_masked.max() diff --git a/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py new file mode 100644 index 0000000000..7a4aa0a40b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_unsqueeze_op.py @@ -0,0 +1,111 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np + +from op_test import OpTest + + +# Correct: General. +class TestUnsqueezeOp(OpTest): + def setUp(self): + self.init_test_case() + self.op_type = "unsqueeze" + self.inputs = {"X": np.random.random(self.ori_shape).astype("float32")} + self.init_attrs() + self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)} + + def test_check_output(self): + self.check_output() + + def test_check_grad(self): + self.check_grad(["X"], "Out") + + def init_test_case(self): + self.ori_shape = (3, 5) + self.axes = (1, 2) + self.new_shape = (3, 1, 1, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": False} + + +# Correct: Single input index. +class TestUnsqueezeOp1(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 5) + self.axes = (-1, ) + self.new_shape = (3, 5, 1) + + +# Correct: Mixed input axis. +class TestUnsqueezeOp2(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 5) + self.axes = (0, -1) + self.new_shape = (1, 3, 5, 1) + + +# Correct: There is duplicated axis. +class TestUnsqueezeOp3(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 2, 5) + self.axes = (0, 3, 3) + self.new_shape = (1, 3, 2, 1, 1, 5) + + +# Correct: Reversed axes. +class TestUnsqueezeOp4(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 2, 5) + self.axes = (3, 1, 1) + self.new_shape = (3, 1, 1, 2, 5, 1) + + +# Correct: Inplace. +class TestUnsqueezeOpInplace1(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 5) + self.axes = (0, 2) + self.new_shape = (1, 3, 1, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +# Correct: Inplace. There is mins index. +class TestUnsqueezeOpInplace2(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 5) + self.axes = (0, -2) + self.new_shape = (1, 3, 1, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +# Correct: Inplace. There is duplicated axis. +class TestUnsqueezeOpInplace3(TestUnsqueezeOp): + def init_test_case(self): + self.ori_shape = (3, 2, 5) + self.axes = (0, 3, 3) + self.new_shape = (1, 3, 2, 1, 1, 5) + + def init_attrs(self): + self.attrs = {"axes": self.axes, "inplace": True} + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_version.py b/python/paddle/fluid/tests/unittests/test_version.py new file mode 100644 index 0000000000..a09c8a759b --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_version.py @@ -0,0 +1,48 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import re + +import paddle.version as fluid_version + + +class VersionTest(unittest.TestCase): + def setUp(self): + self._major_regex = "[0-9]+" + self._minor_regex = "[0-9]+" + self._patch_regex = "[0-9]+(\\.(a|b|rc)\\.[0-9]+)?" + self._rc_regex = "[0-9]+" + self._version_regex = "[0-9]+\\.[0-9]+\\.[0-9]+(\\.(a|b|rc)\\.[0-9]+)?" + self._commit_regex = "[0-9a-f]{5,49}" + + def test_check_output(self): + # check commit format + self.assertTrue(re.match(self._commit_regex, fluid_version.commit)) + self.assertTrue(isinstance(fluid_version.istaged, bool)) + + # check version format + if fluid_version.istaged: + self.assertEqual(fluid_version.major, 0) + self.assertEqual(fluid_version.minor, 0) + self.assertEqual(fluid_version.patch, "0") + self.assertEqual(fluid_version.rc, 0) + self.assertEqual(fluid_version.full_version, "0.0.0") + else: + self.assertTrue(re.match(self._major_regex, fluid_version.major)) + self.assertTrue(re.match(self._minor_regex, fluid_version.minor)) + self.assertTrue(re.match(self._patch_regex, fluid_version.patch)) + self.assertTrue(re.match(self._rc_regex, fluid_version.rc)) + self.assertTrue( + re.match(self._version_regex, fluid_version.full_version)) diff --git a/python/paddle/fluid/tests/unittests/test_warpctc_op.py b/python/paddle/fluid/tests/unittests/test_warpctc_op.py index ac638f7836..9f1aaee472 100644 --- a/python/paddle/fluid/tests/unittests/test_warpctc_op.py +++ b/python/paddle/fluid/tests/unittests/test_warpctc_op.py @@ -34,8 +34,8 @@ class CTCForward(object): self.level = 0 self.num_classes = softmax.shape[1] - self.batch_size = len(softmax_lod[self.level]) - 1 - assert self.batch_size == len(labels_lod[self.level]) - 1 + self.batch_size = len(softmax_lod[self.level]) + assert self.batch_size == len(labels_lod[self.level]) self.loss = np.zeros([self.batch_size, 1], dtype="float32") self.gradient = np.zeros(self.softmax.shape, dtype="float32") @@ -156,16 +156,20 @@ class CTCForward(object): return -log_prob def forward(self): + softmax_offset = 0 + labels_offset = 0 for i in range(self.batch_size): - softmax_start_i = self.softmax_lod[self.level][i] - softmax_end_i = self.softmax_lod[self.level][i + 1] - labels_start_i = self.labels_lod[self.level][i] - labels_end_i = self.labels_lod[self.level][i + 1] + softmax_start_i = softmax_offset + softmax_end_i = softmax_offset + self.softmax_lod[self.level][i] + labels_start_i = labels_offset + labels_end_i = labels_offset + self.labels_lod[self.level][i] softmax_a_sequence = self.softmax[softmax_start_i:softmax_end_i, :] labels_a_sequence = self.labels[labels_start_i:labels_end_i, :] self.loss[i] = self.forward_a_sequence(softmax_a_sequence, labels_a_sequence) + softmax_offset += self.softmax_lod[self.level][i] + labels_offset += self.labels_lod[self.level][i] return self.loss @@ -173,8 +177,8 @@ class TestWarpCTCOp(OpTest): def config(self): self.batch_size = 4 self.num_classes = 8 - self.logits_lod = [[0, 4, 5, 8, 11]] - self.labels_lod = [[0, 3, 4, 8, 12]] + self.logits_lod = [[4, 1, 3, 3]] + self.labels_lod = [[3, 1, 4, 4]] self.blank = self.num_classes - 1 self.norm_by_times = False @@ -184,11 +188,13 @@ class TestWarpCTCOp(OpTest): logits = np.random.uniform( 0.1, 1.0, - [self.logits_lod[0][-1], self.num_classes]).astype("float32") + [sum(self.logits_lod[0]), self.num_classes]).astype("float32") softmax = np.apply_along_axis(stable_softmax, 1, logits) # labels should not be blank labels = np.random.randint( - 0, self.num_classes - 1, [self.labels_lod[0][-1], 1], dtype="int32") + 0, + self.num_classes - 1, [sum(self.labels_lod[0]), 1], + dtype="int32") ctc = CTCForward(softmax, self.logits_lod, labels, self.labels_lod, self.blank, self.norm_by_times) @@ -196,9 +202,8 @@ class TestWarpCTCOp(OpTest): max_sequence_length = 0 for i in range(self.batch_size): - max_sequence_length = max( - max_sequence_length, - self.logits_lod[0][i + 1] - self.logits_lod[0][i]) + max_sequence_length = max(max_sequence_length, + self.logits_lod[0][i]) self.gradient = np.zeros( [max_sequence_length, self.batch_size, self.num_classes], dtype="float32") @@ -222,8 +227,8 @@ class TestWarpCTCOpCase1(TestWarpCTCOp): def config(self): self.batch_size = 4 self.num_classes = CUDA_BLOCK_SIZE + 2 - self.logits_lod = [[0, 4, 5, 8, 11]] - self.labels_lod = [[0, 3, 4, 8, 12]] + self.logits_lod = [[4, 1, 3, 3]] + self.labels_lod = [[3, 1, 4, 4]] self.blank = 0 self.norm_by_times = False diff --git a/python/paddle/fluid/tests/unittests/test_weight_normalization.py b/python/paddle/fluid/tests/unittests/test_weight_normalization.py index 2adf917bc5..436f9b9f86 100644 --- a/python/paddle/fluid/tests/unittests/test_weight_normalization.py +++ b/python/paddle/fluid/tests/unittests/test_weight_normalization.py @@ -76,11 +76,11 @@ class TestWeightNormalization(unittest.TestCase): lod_level_i = numpy.random.randint( low=1, high=5, - size=self.batch_size if i == 0 else lod_level_i[-1]) - lod_level_i = [0] + numpy.cumsum(lod_level_i).tolist() + size=self.batch_size + if i == 0 else sum(lod_level_i)).tolist() data_lod.append(lod_level_i) data_value = numpy.random.random( - size=[data_lod[-1][-1] if data_lod else self.batch_size + size=[sum(data_lod[-1]) if data_lod else self.batch_size ] + data_shape).astype('float32') self.data[data_name] = (data_value, data_lod) @@ -90,7 +90,7 @@ class TestWeightNormalization(unittest.TestCase): tensor = fluid.Tensor() tensor.set(self.data[desc[0]][0], place) if self.data[desc[0]][1]: - tensor.set_lod(self.data[desc[0]][1]) + tensor.set_recursive_sequence_lengths(self.data[desc[0]][1]) self.inputs[desc[0]] = tensor def weight_normalize(self): diff --git a/python/paddle/fluid/tests/unittests/test_while_op.py b/python/paddle/fluid/tests/unittests/test_while_op.py index fe8808bc04..790e6afe5f 100644 --- a/python/paddle/fluid/tests/unittests/test_while_op.py +++ b/python/paddle/fluid/tests/unittests/test_while_op.py @@ -66,7 +66,7 @@ class TestWhileOp(unittest.TestCase): exe = Executor(cpu) d = [] - for i in xrange(3): + for i in range(3): d.append(numpy.random.random(size=[10]).astype('float32')) outs = exe.run(feed={'d0': d[0], diff --git a/python/paddle/fluid/tests/unittests/testsuite.py b/python/paddle/fluid/tests/unittests/testsuite.py new file mode 100644 index 0000000000..c6e176ca31 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/testsuite.py @@ -0,0 +1,190 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import numpy as np + +import paddle.fluid.core as core +from paddle.fluid.op import Operator + + +def create_op(scope, op_type, inputs, outputs, attrs): + kwargs = dict() + + op_maker = core.op_proto_and_checker_maker + op_role_attr_name = op_maker.kOpRoleAttrName() + + if op_role_attr_name not in attrs: + attrs[op_role_attr_name] = int(op_maker.OpRole.Forward) + + def __create_var__(name, var_name): + scope.var(var_name).get_tensor() + kwargs[name].append(var_name) + + for in_name, in_dup in Operator.get_op_inputs(op_type): + if in_name in inputs: + kwargs[in_name] = [] + if in_dup: + sub_in = inputs[in_name] + for item in sub_in: + sub_in_name, _ = item[0], item[1] + __create_var__(in_name, sub_in_name) + else: + __create_var__(in_name, in_name) + + for out_name, out_dup in Operator.get_op_outputs(op_type): + if out_name in outputs: + kwargs[out_name] = [] + if out_dup: + sub_out = outputs[out_name] + for item in sub_out: + sub_out_name, _ = item[0], item[1] + __create_var__(out_name, sub_out_name) + else: + __create_var__(out_name, out_name) + + for attr_name in Operator.get_op_attr_names(op_type): + if attr_name in attrs: + kwargs[attr_name] = attrs[attr_name] + + return Operator(op_type, **kwargs) + + +def set_input(scope, op, inputs, place): + def np_value_to_fluid_value(input): + if input.dtype == np.float16: + input = input.view(np.uint16) + return input + + def __set_input__(var_name, var): + if isinstance(var, tuple) or isinstance(var, np.ndarray): + tensor = scope.find_var(var_name).get_tensor() + if isinstance(var, tuple): + tensor.set_recursive_sequence_lengths(var[1]) + var = var[0] + tensor._set_dims(var.shape) + tensor.set(np_value_to_fluid_value(var), place) + elif isinstance(var, float): + scope.find_var(var_name).set_float(var) + elif isinstance(var, int): + scope.find_var(var_name).set_int(var) + + for in_name, in_dup in Operator.get_op_inputs(op.type()): + if in_name in inputs: + if in_dup: + sub_in = inputs[in_name] + for item in sub_in: + sub_in_name, sub_in_val = item[0], item[1] + __set_input__(sub_in_name, sub_in_val) + else: + __set_input__(in_name, inputs[in_name]) + + +def append_input_output(block, op_proto, np_list, is_input, dtype): + '''Insert VarDesc and generate Python variable instance''' + proto_list = op_proto.inputs if is_input else op_proto.outputs + + def create_var(block, name, np_list, var_proto): + dtype = None + shape = None + lod_level = None + if name not in np_list: + assert var_proto.intermediate, "{} not found".format(name) + else: + # inferece the dtype from numpy value. + np_value = np_list[name] + if isinstance(np_value, tuple): + dtype = np_value[0].dtype + # output shape, lod should be infered from input. + if is_input: + shape = list(np_value[0].shape) + lod_level = len(np_value[1]) + else: + dtype = np_value.dtype + if is_input: + shape = list(np_value.shape) + lod_level = 0 + # NOTE(dzhwinter): type hacking + # numpy float16 is binded to paddle::platform::float16 + # in tensor_py.h via the help of uint16 datatype. Because + # the internal memory representation of float16 is + # actually uint16_t in paddle. So we use np.uint16 in numpy for + # raw memory, it can pass through the pybind. So in the testcase, + # we feed data use data.view(uint16), but the dtype is float16 in fact. + # The data.view(uint16) means do not cast the data type, but process data as the uint16 + if dtype == np.uint16: + dtype = np.float16 + return block.create_var( + dtype=dtype, shape=shape, lod_level=lod_level, name=name) + + var_dict = {} + for var_proto in proto_list: + var_name = str(var_proto.name) + if is_input: + if (var_name not in np_list) and var_proto.dispensable: + continue + assert (var_name in np_list) or (var_proto.dispensable), \ + "Missing {} as input".format(var_name) + if var_proto.duplicable: + assert isinstance(np_list[var_name], list), \ + "Duplicable {} should be set as list".format(var_name) + var_list = [] + for (name, np_value) in np_list[var_name]: + var_list.append( + create_var(block, name, {name: np_value}, var_proto)) + var_dict[var_name] = var_list + else: + var_dict[var_name] = create_var(block, var_name, np_list, var_proto) + + return var_dict + + +def append_loss_ops(block, output_names): + mean_inputs = list(map(block.var, output_names)) + # for item in mean_inputs: + # print(item) + # print("Item", item.dtype) + + if len(mean_inputs) == 1: + loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1]) + op = block.append_op( + inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean') + op.desc.infer_var_type(block.desc) + op.desc.infer_shape(block.desc) + else: + avg_sum = [] + for cur_loss in mean_inputs: + cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1]) + op = block.append_op( + inputs={"X": [cur_loss]}, + outputs={"Out": [cur_avg_loss]}, + type="mean") + op.desc.infer_var_type(block.desc) + op.desc.infer_shape(block.desc) + avg_sum.append(cur_avg_loss) + + loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1]) + op_sum = block.append_op( + inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum') + op_sum.desc.infer_var_type(block.desc) + op_sum.desc.infer_shape(block.desc) + + loss = block.create_var(dtype=loss_sum.dtype, shape=[1]) + op_loss = block.append_op( + inputs={"X": loss_sum}, + outputs={"Out": loss}, + type='scale', + attrs={'scale': 1.0 / float(len(avg_sum))}) + op_loss.desc.infer_var_type(block.desc) + op_loss.desc.infer_shape(block.desc) + return loss diff --git a/python/paddle/fluid/tests/unittests/transformer_model.py b/python/paddle/fluid/tests/unittests/transformer_model.py index c62792face..868a0248be 100644 --- a/python/paddle/fluid/tests/unittests/transformer_model.py +++ b/python/paddle/fluid/tests/unittests/transformer_model.py @@ -22,7 +22,7 @@ pos_enc_param_names = ( "src_pos_enc_table", "trg_pos_enc_table", ) -batch_size = 64 +batch_size = 2 def position_encoding_init(n_position, d_pos_vec): @@ -118,8 +118,9 @@ def multi_head_attention(queries, # FIXME(guosheng): Decouple the program desc with batch_size. return layers.reshape( x=trans_x, - shape=map(int, - [batch_size, -1, trans_x.shape[2] * trans_x.shape[3]])) + shape=list( + map(int, [batch_size, -1, trans_x.shape[2] * trans_x.shape[3] + ]))) def scaled_dot_product_attention(q, k, v, attn_bias, d_model, dropout_rate): """ @@ -403,7 +404,7 @@ def transformer( trg_pad_idx, pos_pad_idx, ): file_obj = fluid.layers.open_recordio_file( - filename='./wmt16.recordio', + filename='/tmp/wmt16.recordio', shapes=[ [batch_size * max_length, 1], [batch_size * max_length, 1], diff --git a/python/paddle/fluid/trainer.py b/python/paddle/fluid/trainer.py index d44cb16bfb..eed9b49ef4 100644 --- a/python/paddle/fluid/trainer.py +++ b/python/paddle/fluid/trainer.py @@ -12,79 +12,264 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os -import core -import framework -import executor -import data_feeder import contextlib -import io -import transpiler +import os +import errno +import shutil +import time +from . import core +from . import data_feeder +from . import executor +from . import framework +from . import io # optimizer is same as the parameter of Trainer.__init__. Rename it to opt_module -import optimizer as opt_module -from transpiler import distribute_transpiler +from . import optimizer as opt_module +from . import parallel_executor +from .transpiler import distribute_transpiler __all__ = [ - 'Trainer', - 'BeginEpochEvent', - 'EndEpochEvent', - 'BeginStepEvent', - 'EndStepEvent', + 'Trainer', 'BeginEpochEvent', 'EndEpochEvent', 'BeginStepEvent', + 'EndStepEvent', 'CheckpointConfig' ] class BeginEpochEvent(object): + """ + The begin of a training epoch. + + Args: + epoch_id(int): The current epoch ID. + """ + def __init__(self, epoch_id): self.epoch = epoch_id class EndEpochEvent(object): + """ + The end of a training epoch. + + Args: + epoch_id(int): The current epoch ID. + """ + def __init__(self, epoch_id): self.epoch = epoch_id class BeginStepEvent(object): + """ + The begin of a training epoch. + + Args: + epoch_id(int): The current epoch ID. + step_id(int): The current step ID. + """ + def __init__(self, epoch_id, step_id): self.epoch = epoch_id self.step = step_id + self.fetch_metrics = True + """ + If fetch_metrics is true, the metrics will be fetched at the + EndStepEvent. Default is True. + """ class EndStepEvent(object): - def __init__(self, epoch_id, step_id): + """ + The end of a training step. + + Args: + epoch_id(int): The current epoch ID. + step_id(int): The current step ID. + metrics(list): A list of fetched tensor. The order of this list is same + as the :code:`train_func` returns. + """ + + def __init__(self, epoch_id, step_id, metrics): self.epoch = epoch_id self.step = step_id + self.metrics = metrics + + +class CheckpointConfig(object): + """ + Parameter object for :code:`save_checkpoint` and + :code:`fluid.Trainer`. Used to configuration how to save checkpoint. + + Args: + checkpoint_dir(str): Directory path to save check point. Default is the + current directory. + + max_num_checkpoints(int): The max number of local check points. + epoch_interval(int): Every number of epoch to save check point. + step_interval(int): Every number of step to save check point. + + Examples: + >>> config = fluid.CheckpointConfig("./checkpoints") + >>> trainer = fluid.Trainer(train_func=train_program, + >>> place=place, + >>> optimizer_func=optimizer_func, + >>> checkpoint_config=config) + >>> trainer.train(...) + """ + + def __init__(self, + checkpoint_dir=None, + max_num_checkpoints=3, + epoch_interval=1, + step_interval=10): + + assert epoch_interval >= 1 + assert step_interval >= 1 + + self.checkpoint_dir = checkpoint_dir \ + if checkpoint_dir is not None else os.getcwd() + self.max_num_checkpoints = max_num_checkpoints + self.epoch_interval = epoch_interval + self.step_interval = step_interval + self.epoch_id = 0 + self.step_id = 0 + self.load_serial = None + self.pserver_id = None + self.lookup_table_name = None + + +def check_and_get_place(place): + """ + Check the type of place or get the default place + Args: + place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on. + + Raises: + TypeError if the type mismatched. + + Returns: + the original place if it is not None. + if fluid is compiled with CUDA, returns CUDAPlace(0) by default. + Otherwise returns CPUPlace by default. + """ + if place is None: + if core.is_compiled_with_cuda(): + return core.CUDAPlace(0) + else: + return core.CPUPlace() + else: + if not isinstance(place, core.CUDAPlace) and not isinstance( + place, core.CPUPlace): + raise TypeError("Place should be either CUDAPlace or CPUPlace") + return place class Trainer(object): """ + A trainer wraps MultiGPU/MultiNode training loops and can be used to train a + simple neural network easily. + + This API takes a :code:`train_func`. A :code:`train_func` is a function that + return loss as it first return value. The reset value can be fetched by + EndStepEvent.metrics + + This API also takes a :code:`optimizer_func` that will return an optimizer + instance. + + For example, to train a MLP for MNIST dataset, the sample program is + + >>> import paddle.fluid as fluid + >>> + >>> def mlp(image, layer_sizes=[200, 100], activation="relu", num_classes=10): + >>> hidden = image + >>> for layer_size in layer_sizes: + >>> hidden = fluid.layers.fc(input=hidden, size=layer_size, act=activation) + >>> return fluid.layers.fc(input=hidden, size=num_classes, act="softmax") + >>> + >>> def train_mnist_mlp(): + >>> img = fluid.layers.data(name='image', shape=[784]) + >>> label = fluid.layers.data(name='label', shape=[1], dtype='int64') + >>> prediction = mlp(img) + >>> return fluid.layers.mean(fluid.layers.cross_entropy(prediction, label)) + >>> + >>> def optimizer(): + >>> return fluid.optimizer.Adam() + >>> + >>> trainer = Trainer(train_func=train_mnist_mlp, + >>> optimizer_func=optimizer, + >>> place=fluid.CUDAPlace(0), + >>> parallel=True) + >>> + >>> def train_callback(event): + >>> if isinstance(event, fluid.EndStepEvent): + >>> print "Epoch ID", event.epoch, "Step ID",\ + >>> event.step, "AvgLoss", event.metrics[0] + >>> elif isinstance(event, fluid.EndEpochEvent): + >>> trainer.save_params("./model_{0}".format(event.epoch)) + >>> + >>> trainer.train(num_epochs=100, event_handler=train_callback) + + For more example, please see :ref:`api_guide_high_level_api`. + Args: - program_func(callable): A function which will return loss. The loss must be a scaler. - optimizer(optimizer.Optimizer): The optimizer should be an instance of Optimizer - place: The device place of this trainer. + train_func(callable): A function which will return loss. The loss must be + a scalar tensor. + optimizer_func(callable): A function that returns an Optimizer object. + place(CUDAPlace|CPUPlace): The device place of this trainer. If + :code:`parallel=True,` all CUDA Places will be used if :code:`place` + is a :code:`CUDAPlace`. + parallel(bool): True if use multiple devices. + checkpoint_config(CheckpointConfig): Configuration about how to save + checkpoints. """ - def __init__(self, program_func, optimizer, param_path=None, place=None): + def __init__(self, + train_func, + optimizer_func, + param_path=None, + place=None, + parallel=False, + checkpoint_config=None): + self.__stop = False + self.parallel = parallel + + # config for checkpoint + # only chief worker will save variables + self.trainer_id = 0 + self.checkpoint_cfg = checkpoint_config + if self.checkpoint_cfg: + assert isinstance(self.checkpoint_cfg, CheckpointConfig) + serial = _get_latest_checkpoint_serial( + self.checkpoint_cfg.checkpoint_dir) + self.checkpoint_cfg.load_serial = serial if serial >= 0 else None + + self.scope = core.Scope() + # 1. we need to generate a framework.Program by calling # program_func. Reference: fluid.program_guard in # test_word2vec.py - self.scope = core.Scope() self.startup_program = framework.Program() self.train_program = framework.Program() with framework.program_guard(self.train_program, self.startup_program): - loss = program_func() + program_func_outs = train_func() + self.train_func_outputs = program_func_outs if isinstance( + program_func_outs, list) else [program_func_outs] + self.test_program = self.train_program.clone(for_test=True) + + # The first element of program_func_outs is loss. + loss = self.train_func_outputs[0] + + optimizer = optimizer_func() if not isinstance(optimizer, opt_module.Optimizer): raise TypeError( "The optimizer should be an instance of Optimizer") - optimize_ops, params_grads = optimizer.minimize(loss) - self.place = Trainer._check_and_get_place(place) + self.place = check_and_get_place(place) - self.dist_transpile_if_necessary(optimize_ops, params_grads) + self._dist_transpile_if_necessary(optimize_ops, params_grads) # 2. move the default_main_program to self.program and run the # default_startup program on an empty core.Scope() @@ -93,11 +278,50 @@ class Trainer(object): exe = executor.Executor(place) exe.run(self.startup_program) - if param_path: + if self.checkpoint_cfg and self.checkpoint_cfg.load_serial is not None: + self._load_checkpoint() + + if param_path and os.path.isdir(param_path): # load params from param_path into scope - io.load_persistables(exe, dirname=param_path) + io.load_persistables( + executor=exe, + dirname=param_path, + main_program=self.startup_program) + + def _transpile_nccl2_dist(self): + # PADDLE_TRAINER_IPS + if "PADDLE_TRAINER_IPS" not in os.environ: + self.nccl_id_var = None + else: + self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID")) + port = os.getenv("PADDLE_PSERVER_PORT") + worker_ips = os.getenv("PADDLE_TRAINER_IPS") + worker_endpoints = [] + for ip in worker_ips.split(","): + worker_endpoints.append(':'.join([ip, port])) + self.num_trainers = len(worker_endpoints) + current_endpoint = os.getenv("PADDLE_CURRENT_IP") + ":" + port + worker_endpoints.remove(current_endpoint) + # TODO(wuyi): use self.nccl_id_var, self.num_trainers and self.trainer_id + # in ParallelExecutor to start + # distributed training using NCCL2 + self.nccl_id_var = self.startup_program.global_block().create_var( + name="NCCLID", persistable=True, type=core.VarDesc.VarType.RAW) + self.startup_program.global_block().append_op( + type="gen_nccl_id", + inputs={}, + outputs={"NCCLID": self.nccl_id_var}, + attrs={ + "endpoint": current_endpoint, + "endpoint_list": worker_endpoints, + "trainer_id": self.trainer_id + }) + + def _dist_transpile_if_necessary(self, optimize_ops, params_grads): + self._transpile_nccl2_dist() + if self.nccl_id_var != None: + return - def dist_transpile_if_necessary(self, optimize_ops, params_grads): if "PADDLE_TRAINING_ROLE" not in os.environ: return @@ -117,14 +341,21 @@ class Trainer(object): current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port # the unique trainer id, starting from 0, needed by trainer # only - trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + self.trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + # the role, should be either PSERVER or TRAINER training_role = os.getenv("PADDLE_TRAINING_ROLE") with self._prog_and_scope_guard(): t = distribute_transpiler.DistributeTranspiler() t.transpile( - trainer_id, pservers=pserver_endpoints, trainers=trainers) + self.trainer_id, pservers=pserver_endpoints, trainers=trainers) if training_role == "PSERVER": + if self.checkpoint_cfg: + pserver_id = eplist.index(current_endpoint) + self.checkpoint_cfg.pserver_id = pserver_id + if t.has_distributed_lookup_table: + self.checkpoint_cfg.lookup_table_name = t.table_name + self.train_program = t.get_pserver_program(current_endpoint) self.startup_program = t.get_startup_program(current_endpoint, self.train_program) @@ -135,73 +366,66 @@ class Trainer(object): 'TRAINING_ROLE environment variable must be either TRAINER or PSERVER' ) - def train(self, - num_epochs, - event_handler, - reader=None, - parallel=False, - feed_order=None): + def stop(self): + """ + stop training + """ + self.__stop = True + + def train(self, num_epochs, event_handler, reader=None, feed_order=None): """ - Train the model. + Start the train loop to train the model. Args: - num_epochs: The number of epoch. An epoch will process all data in reader - event_handler: The event handler. A function with type (ev:Event)->void - reader: - parallel: True if use multi-CPUs or multi-GPUs - feed_order: Feeding order of reader. None will following the defining + num_epochs(int): The number of epoch. An epoch will process all data in reader + event_handler(callable): The event handler. A function with type (ev:Event)->void + reader(callable): A reader creator object. See also + :ref:`api_guide_python_reader` . + feed_order(list): Feeding order of reader. None will following the defining order in program Returns: - + None """ - if parallel: - raise NotImplementedError( - "Parallel Executor version of trainer is not implemented") - training_role = os.getenv("PADDLE_TRAINING_ROLE", "") if training_role == "PSERVER": with self._prog_and_scope_guard(): exe = executor.Executor(self.place) exe.run() return + if self.parallel: + self._train_by_parallel_executor(num_epochs, event_handler, reader, + feed_order) + else: + self._train_by_executor(num_epochs, event_handler, reader, + feed_order) - self._train_by_executor(num_epochs, event_handler, reader, feed_order) + def test(self, reader, feed_order): + """ + Test the model on given test data - def test(self, reader): - pass + Args: + reader(callable): The reader that yields test data. + feed_order(list): Feeding order of reader. None will following the + defining order in program + """ - def save_params(self, param_path): - # reference: save_persistables in io.py - with self._prog_and_scope_guard(): - exe = executor.Executor(self.place) - io.save_persistables(exe, dirname=param_path) + return self._test_by_executor(reader, feed_order, + self.train_func_outputs) - @staticmethod - def _check_and_get_place(place): + def save_params(self, param_path): """ - Check the type of place or get the default place - Args: - place(None|core.CUDAPlace|core.CPUPlace): the place that trainer will be executed on. + Save all parameters into :code:`param_path`. - Raises: - TypeError if the type mismatched. + Args: + param_path(str): The path to save parameters. Returns: - the original place if it is not None. - if fluid is compiled with CUDA, returns CUDAPlace(0) by default. - Otherwise returns CPUPlace by default. + None """ - if place is None: - if core.is_compiled_with_cuda(): - return core.CUDAPlace(0) - else: - return core.CPUPlace() - else: - if not isinstance(place, core.CUDAPlace) and not isinstance( - place, core.CPUPlace): - raise TypeError("Place should be either CUDAPlace or CPUPlace") - return place + with self._prog_and_scope_guard(): + exe = executor.Executor(self.place) + io.save_persistables(exe, dirname=param_path) @contextlib.contextmanager def _prog_and_scope_guard(self): @@ -225,26 +449,784 @@ class Trainer(object): """ with self._prog_and_scope_guard(): + feed_var_list = build_feed_var_list(self.train_program, feed_order) + feeder = data_feeder.DataFeeder( + feed_list=feed_var_list, place=self.place) exe = executor.Executor(self.place) - if feed_order is None: - feed_var_list = [ - var - for var in self.train_program.global_block( - ).vars.itervalues() - if hasattr(var, 'is_data') and var.is_data - ] - else: - feed_var_list = [ - self.train_program.global_block().var(var_name) - for var_name in feed_order - ] + reader = feeder.decorate_reader(reader, multi_devices=False) + self._train_by_any_executor(event_handler, exe, num_epochs, reader) + def _train_by_any_executor(self, event_handler, exe, num_epochs, reader): + if self.checkpoint_cfg: + epochs = [ + epoch_id for epoch_id in range(num_epochs) + if epoch_id >= self.checkpoint_cfg.epoch_id + ] + else: + epochs = [epoch_id for epoch_id in range(num_epochs)] + + for epoch_id in epochs: + event_handler(BeginEpochEvent(epoch_id)) + for step_id, data in enumerate(reader()): + if self.__stop: + if self.checkpoint_cfg: + self._clean_checkpoint() + return + + if self.checkpoint_cfg and self.checkpoint_cfg.load_serial \ + and self.checkpoint_cfg.step_id >= step_id and self.checkpoint_cfg.epoch_id == epoch_id: + continue + + begin_event = BeginStepEvent(epoch_id, step_id) + event_handler(begin_event) + if begin_event.fetch_metrics: + metrics = exe.run(feed=data, + fetch_list=[ + var.name + for var in self.train_func_outputs + ]) + else: + metrics = exe.run(feed=data, fetch_list=[]) + + if self.checkpoint_cfg: + self._save_checkpoint(epoch_id, step_id) + event_handler(EndStepEvent(epoch_id, step_id, metrics)) + event_handler(EndEpochEvent(epoch_id)) + if self.checkpoint_cfg: + self._clean_checkpoint() + + def _test_by_executor(self, reader, feed_order, fetch_list): + with executor.scope_guard(self.scope): + feed_var_list = build_feed_var_list(self.test_program, feed_order) + feeder = data_feeder.DataFeeder( + feed_list=feed_var_list, place=self.place) + exe = executor.Executor(self.place) + accumulated = len(fetch_list) * [0] + count = 0 + for data in reader(): + outs = exe.run(program=self.test_program, + feed=feeder.feed(data), + fetch_list=fetch_list) + accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)] + count += 1 + + return [x / count for x in accumulated] + + def _train_by_parallel_executor(self, num_epochs, event_handler, reader, + feed_order): + with self._prog_and_scope_guard(): + pe = self._get_or_create_parallel_executor() + feed_var_list = build_feed_var_list(self.train_program, feed_order) feeder = data_feeder.DataFeeder( feed_list=feed_var_list, place=self.place) - for epoch_id in range(num_epochs): - event_handler(BeginEpochEvent(epoch_id)) - for step_id, data in enumerate(reader()): - event_handler(BeginStepEvent(epoch_id, step_id)) - exe.run(feed=feeder.feed(data), fetch_list=[]) - event_handler(EndStepEvent(epoch_id, step_id)) - event_handler(EndEpochEvent(epoch_id)) + reader = feeder.decorate_reader(reader, multi_devices=True) + self._train_by_any_executor(event_handler, pe, num_epochs, reader) + + def _get_parallel_executor(self): + return getattr(self, 'parallel_executor', None) + + def _get_or_create_parallel_executor(self): + if self._get_parallel_executor() is None: + self.parallel_executor = parallel_executor.ParallelExecutor( + use_cuda=isinstance(self.place, core.CUDAPlace), + loss_name=self.train_func_outputs[0].name) + return self._get_parallel_executor() + + def _clean_checkpoint(self): + assert self.checkpoint_cfg + clean_checkpoint(checkpoint_dir=self.checkpoint_cfg.checkpoint_dir) + + def _get_checkpoint_load_args(self): + """ + epoch_id and step_id are runtime arguments, they are not variables, will load them independently. + """ + return ["epoch_id", "step_id"] + + def _get_checkpoint_save_args(self, epoch_id, step_id): + """ + epoch_id and step_id are runtime arguments, they are not variables, will save them independently. + """ + trainer_args = {} + trainer_args["epoch_id"] = epoch_id + trainer_args["step_id"] = step_id + return trainer_args + + def _save_checkpoint(self, epoch_id, step_id): + assert self.checkpoint_cfg + + if epoch_id % self.checkpoint_cfg.epoch_interval == 0 \ + and step_id % self.checkpoint_cfg.step_interval == 0: + exe = executor.Executor(self.place) + save_checkpoint( + executor=exe, + checkpoint_dir=self.checkpoint_cfg.checkpoint_dir, + trainer_id=self.trainer_id, + trainer_args=self._get_checkpoint_save_args(epoch_id, step_id), + main_program=self.train_program, + max_num_checkpoints=self.checkpoint_cfg.max_num_checkpoints) + + def _load_checkpoint(self): + with self._prog_and_scope_guard(): + exe = executor.Executor(self.place) + load_checkpoint( + executor=exe, + checkpoint_dir=self.checkpoint_cfg.checkpoint_dir, + main_program=self.startup_program) + + if not self.checkpoint_cfg.pserver_id: + load_trainer_args = self._get_checkpoint_load_args() + trainer_args = load_checkpoint( + executor=exe, + checkpoint_dir=self.checkpoint_cfg.checkpoint_dir, + main_program=self.startup_program, + role_id=self.trainer_id, + is_trainer=True, + load_trainer_args=load_trainer_args) + + if len(trainer_args) != 2: + raise ValueError( + "the return trainer_args length do not equal _get_checkpoint_load_args" + ) + self.checkpoint_cfg.epoch_id = int(trainer_args[0]) + self.checkpoint_cfg.step_id = int(trainer_args[1]) + else: + if self.checkpoint_cfg.lookup_table_name: + load_checkpoint( + executor=exe, + checkpoint_dir=self.checkpoint_cfg.checkpoint_dir, + main_program=self.startup_program, + role_id=self.checkpoint_cfg.pserver_id, + is_trainer=False, + load_trainer_args=None, + load_lookup_table=self.checkpoint_cfg.lookup_table_name) + + +def build_feed_var_list(program, feed_order): + if not isinstance(program, framework.Program): + raise TypeError("The 'program' should be an object of Program") + + if isinstance(feed_order, list): + feed_var_list = [ + program.global_block().var(var_name) for var_name in feed_order + ] + else: + if not isinstance(feed_order, dict): + raise TypeError( + "The 'feed_order' should be either None, list or dict.") + if not sorted(feed_order.values()) == list(range(len(feed_order))): + raise ValueError( + "The values of 'feed_order' should be a permutation of [0, len(feed_order))" + ) + sorted_pair_list = sorted( + list(feed_order.items()), key=lambda item: item[1]) + feed_var_list = [ + program.global_block().var(pair[0]) for pair in sorted_pair_list + ] + return feed_var_list + + +# move Checkpoint APIs from io.py to trainer.py, make all of them are private. +SUCCESS_MARK_FILENAME = "_SUCCESS" +CHECKPOINT_PREFIX = "checkpoint" +MODEL_DIR = "__model__" +LOOKUP_TABLE_DIR = "__lookup_table__" +TRAINER_PREFIX = "trainer" +CHECKPOINT_SEPARATOR = "_" + + +def save_checkpoint(executor, + checkpoint_dir, + trainer_id, + main_program, + trainer_args=None, + max_num_checkpoints=3, + lookup_table=None, + pserver_endpoints=None): + """ + This function filters out all checkpoint variables from the give + main_program and then saves these variables to the `checkpoint_dir` + directory. + + In the training precess, we generally save a checkpoint in each + iteration. So there might be a lot of checkpoints in the + `checkpoint_dir`. To avoid them taking too much disk space, the + `max_num_checkpoints` are introduced to limit the total number of + checkpoints. If the number of existing checkpints is greater than + the `max_num_checkpoints`, oldest ones will be scroll deleted. + + A variable is a checkpoint variable and will be saved if it meets + all following conditions: + 1. It's persistable. + 2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW. + 3. It's name contains no "@GRAD" nor ".trainer_" nor ".block". + + Args: + executor(Executor): The executor to run for save checkpoint. + checkpoint_dir(str): The folder where to save checkpoints. + trainer_id(int): currect trainer id, if id is equal to 0, the trainer + is chief. + trainer_args(dict|None): Current training arguments. Such as 'epoch_id' + and 'step_id'. + Defaut: None + main_program(Program): The program whose checkpoint variables will + be saved. + max_num_checkpoints(int): The max number of total number of existing + checkpoints. + Default: 3 + lookup_table(string|None): the lookup table name, when use distribute + lookup table, we can get lookup table name by DistributeTranspiler. + table_name + pserver_endpoints(list|None): the parameter server ip:port list. + when use distribute lookup table, we can get pserver_endpoints by + distribute arguments. + + Returns: + None + + Raises: + ValueError: If `checkpoint_dir` is None. + AssertionError: If `trainer_args` is not a dict. + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./checkpoints" + prog = fluid.default_main_program() + trainer_args = {"epoch_id": 200, + "step_id": 20} # just an example + table_name = "share_w" + ps_endpoints = ["127.0.0.1:6000","127.0.0.1:6001"] + + save_checkpoint(executor=exe, + checkpoint_dir=path, + trainer_id=0, + trainer_args=trainer_args, + main_program=prog, + max_num_checkpoints=3, + lookup_table=table_name, + pserver_endpoints = ps_endpoints) + """ + if checkpoint_dir is None: + raise ValueError("'checkpoint_dir' should not be None") + + if main_program is None: + raise ValueError('main_program should not be None.') + + if trainer_args: + assert isinstance(trainer_args, dict) + + is_chief = trainer_id == 0 + + _make_chekcpoint_dirs(checkpoint_dir) + serial = _get_latest_checkpoint_serial(checkpoint_dir) + 1 + cur_dir = _get_serial_dir(checkpoint_dir, serial) + + _save_trainer_args(cur_dir, trainer_id, trainer_args) + + if is_chief: + _save_persist_vars_without_grad(executor, cur_dir, main_program) + + if is_chief and lookup_table and pserver_endpoints: + _save_pserver_vars_by_notify(executor, cur_dir, lookup_table, + pserver_endpoints) + + _scroll_delete(checkpoint_dir, max_num_checkpoints) + + +def load_checkpoint(executor, + checkpoint_dir, + main_program, + role_id=0, + is_trainer=True, + load_trainer_args=None, + load_lookup_table=None): + """ + This function filters out all checkpoint variables from the give + main_program and then try to load these variables from the + `checkpoint_dir` directory. + + In the training precess, we generally save a checkpoint in each + iteration. So there are more than one checkpoint in the + `checkpoint_dir` (each checkpoint has its own sub folder), use + `serial` to specify which serial of checkpoint you would like to + load. + + A variable is a checkpoint variable and will be loaded if it meets + all following conditions: + 1. It's persistable. + 2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW. + 3. It's name contains no "@GRAD" nor ".trainer_" nor ".block". + + Args: + executor(Executor): The executor to run for loading checkpoint. + checkpoint_dir(str): The folder where all checkpoints are. + serial(int): The serial of checkpoint you would like to load. + main_program(Program): The program whose checkpoint variables will + be loaded. + role_id(int): the trainer id or the parameter server id. + is_trainer(bool): trainer is True and parameter server is False. + load_trainer_args(list|None): list about load trainer args. + load_lookup_table(str|None): the lookup table name + + Returns: + None + + Raises: + ValueError: If `checkpoint_dir` is None. + ValueError: If `main_program` is None. + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + path = "./checkpoints" + prog = fluid.default_main_program() + load_checkpoint(executor=exe, checkpoint_dir=path, + serial=9, main_program=prog) + + # In this example, `load_checkpoint` function + # will first filters out all checkpoint variables in the default + # main program, and then try to load these variables form the + # folder "./checkpoints/checkpoint_9/__model__". + """ + + if checkpoint_dir is None: + raise ValueError("'checkpoint_dir' should not be None") + + serial = _get_latest_checkpoint_serial(checkpoint_dir) + + # there are nothing need to be loaded + if serial is None or serial < 0: + return + + if main_program is None: + raise ValueError('main_program should not be None.') + + if is_trainer and load_trainer_args is None: + cur_dir = _get_serial_dir(checkpoint_dir, serial) + _load_persist_vars_without_grad(executor, cur_dir, main_program, True) + return + + if is_trainer and load_trainer_args: + return _load_trainer_args(checkpoint_dir, serial, role_id, + load_trainer_args) + + if not is_trainer and load_lookup_table: + _load_lookup_table_vars(executor, checkpoint_dir, main_program, role_id, + load_lookup_table) + + +def clean_checkpoint(checkpoint_dir, delete_dir=False): + """ + clean the checkpoint dir, when the train exits normally, + the trainer will call clean_checkpoint to delete checkpoint directory saved before. + delete_dir only works when the directory is empty, otherwise, OSError is raised. + + : param checkpoint_dir + : param delete_dir + """ + + if checkpoint_dir is None: + raise ValueError("'checkpoint_dir' should not be None") + _scroll_delete(checkpoint_dir, max_num_checkpoints=0) + + if delete_dir and not os.listdir(checkpoint_dir): + os.rmdir(checkpoint_dir) + + +def _load_persist_vars_without_grad(executor, + dirname, + program, + has_model_dir=False): + """ + This function filters out all checkpoint variables from the give + program and then trys to load these variables from the given directory. + + A variable is a checkpoint variable if it meets all following + conditions: + 1. It's persistable. + 2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW. + 3. It's name contains no "@GRAD" nor ".trainer_" nor ".block". + + Args: + executor(Executor): The executor to run for loading variables. + dirname(str): The directory path. + program(Program): The program whose checkpoint variables will + be loaded. + has_model_dir(bool): if True, the function loads variables + from a sub directory named '__model__'. + Default: False + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + _load_persist_vars_without_grad(executor=exe, + dirname=param_path, program=prog, has_model_dir=True) + + # In this example, `_load_persist_vars_without_grad` function + # will first filters out all checkpoint variables in the default + # main program, and then trys to load these variables form the + # folder "./my_paddle_model/__model__". + """ + + if has_model_dir: + dirname = _get_model_dir(dirname) + + io.load_vars( + executor, + dirname=dirname, + main_program=program, + predicate=_is_checkpoint_var, + filename=None) + + +def _load_lookup_table_vars(executor, dirname, program, pserver_id, table_name): + """ + The parameter server will load lookup table's local file in + selectedrows variable. + + Args: + executor(Executor): The executor to run for loading persistable variables + dirname(str): The directory path + main_program(Program): Find the variable named table_name in main_program + pserver_id(int): the serial number in pserver_endpoints list + table_name(str): lookup table name + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + dirname = "./checkpoints/checkpoint_9/" + prog = fluid.default_main_program() + pserver_id = 1 + table_name = "share_w" + _load_lookup_table_vars(executor=exe, + dirname=dirname, program=prog, pserver_id=pserver_id, + table_name=table_name) + """ + + for var in program.list_vars(): + if var.name == table_name: + lookup_table_var = var + break + + assert lookup_table_var is not None + + lookup_table_dir = os.path.join(dirname, LOOKUP_TABLE_DIR) + table_file = table_name + CHECKPOINT_SEPARATOR + str(pserver_id) + + load_prog = framework.Program() + load_block = load_prog.global_block() + + load_block.append_op( + type='load', + inputs={}, + outputs={'Out': [lookup_table_var]}, + attrs={'file_path': os.path.join(lookup_table_dir, table_file)}) + + executor.run(load_prog) + + +def _save_persist_vars_without_grad(executor, dirname, program): + """ + This function filters out all checkpoint variables from the give + program and then save these variables to a sub-folder '__model__' of + the given directory. + + A variable is a checkpoint variable if it meets all following + conditions: + 1. It's persistable. + 2. It's type is not FEED_MINIBATCH nor FETCH_LIST nor RAW. + 3. It's name contains no "@GRAD" nor ".trainer_" nor ".block". + + Args: + executor(Executor): The executor to run for saving variables. + dirname(str): The directory path. + program(Program): The program whose checkpoint variables will + be saved. + + Returns: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + _save_persist_vars_without_grad(executor=exe, + dirname=param_path, program=prog) + + # In this example, `_save_persist_vars_without_grad` function + # will first filters out all checkpoint variables in the default + # main program, and then saves these variables to the folder + # "./my_paddle_model/__model__". + """ + cur_dir = _get_model_dir(dirname) + io.save_vars( + executor, + dirname=cur_dir, + main_program=program, + vars=None, + predicate=_is_checkpoint_var, + filename=None) + _write_success(cur_dir) + + +def _save_pserver_vars_by_notify(executor, dirname, lookup_table, + ps_endpoint_list): + """ + This function will send checkpoint notify message from Trainer 0 + to all the pservers. + The checkpoint notify message contains lookup table name, + the absolute path on pserver to save lookup_table. + + Args: + executor(Executor): The executor to run for send checkpoint notify. + dirname(str): The folder where to save checkpoints. + lookup_table(string): the lookup table name, when use distribute + lookup table, we can get lookup table name by DistributeTranspiler. + table_name + ps_endpoint_list(list): the parameter server ip:port list. + when use distribute lookup table, we can get ps_endpoint_list by + distribute arguments. + Return: + None + + Examples: + .. code-block:: python + + exe = fluid.Executor(fluid.CPUPlace()) + param_path = "./my_paddle_model" + prog = fluid.default_main_program() + table_name = "share_w" + ps_endpoints = ["127.0.0.1:6000","127.0.0.1:6001"] + + _save_pserver_vars_by_notify(executor=exe, + dirname=param_path, lookup_table=table_name, + ps_endpoint_list=ps_endpoints) + """ + cur_dir = _get_lookuptable_dir(dirname) + + checkpoint_notify_program = framework.Program() + checkpoint_notify_block = checkpoint_notify_program.global_block() + + attrs = {} + attrs['epmap'] = ps_endpoint_list + attrs['dir'] = cur_dir + attrs['lookup_table'] = lookup_table + + checkpoint_notify_block.append_op( + type='checkpoint_notify', inputs={}, outputs={}, attrs=attrs) + executor.run(checkpoint_notify_program) + + +def _save_trainer_args(dirname, trainer_id, trainer_args): + assert isinstance(trainer_args, dict) + + cur_dir = _get_trainer_dir(dirname, trainer_id) + + for name, value in list(trainer_args.items()): + args_file = os.path.join(cur_dir, name) + with open(args_file, 'w') as f: + f.write(str(value)) + _write_success(cur_dir) + + +def _load_trainer_args(checkpoint_dir, serial, trainer_id, trainer_args): + """ + trainer will load some args from it's independent directory, + such as epoch_id and step_id. + + Args: + checkpoint_dir(str): The folder where all checkpoints are. + serial(int): The serial of checkpoint you would like to load. + trainer_id(int): current trainer id. + trainer_args(list): list about load trainer args + Return: + None + + Examples: + .. code-block:: python + + param_path = "./checkpoint/" + serial = 7 + trainer_id = 2 + trainer_args = ["epoch_id", "step_id"] + + _load_trainer_args(checkpoint_dir=param_path, serial=serial, + trainer_id=trainer_id, trainer_args=trainer_args) + """ + assert isinstance(trainer_args, list) + + cur_dir = _get_serial_dir(checkpoint_dir, serial) + cur_dir = _get_trainer_dir(cur_dir, trainer_id) + + ret_values = [] + + for arg in trainer_args: + cur_file = os.path.join(cur_dir, arg) + with open(cur_file, 'r') as f: + contents = f.read() + ret_values.append(contents.strip()) + return ret_values + + +def _is_checkpoint_var(var): + """ + the checkpoint will not save or load all the variables. + var type is FEED_MINIBATCH/FETCH_LIST/RAW or var name ends with @GRAD are discarded. + + : param var(Variable) + """ + if var.desc.type() == core.VarDesc.VarType.FEED_MINIBATCH or \ + var.desc.type() == core.VarDesc.VarType.FETCH_LIST or \ + var.desc.type() == core.VarDesc.VarType.RAW: + return False + # @GRAD are named for gradient variables, checkpoint will not save it. + if "@GRAD" in var.name: + return False + # .trainer_ are named for distribute train variables, checkpoint will not save it. + if ".trainer_" in var.name: + return False + + # .block is named for distribute train variables, checkpoint will not save it. + if ".block" in var.name: + return False + + return var.persistable + + +def _make_chekcpoint_dirs(dirs): + """ + _make_chekcpoint_dirs will makdir local directory directly, when the directory is exist, it will igore it. + """ + assert dirs is not None + + if os.path.isfile(dirs): + raise OSError(errno.ENOTDIR, "dirs path shoule be a Directory.", dirs) + + if not os.path.isdir(dirs): + try: + os.makedirs(dirs) + except OSError as err: + if err.errno != errno.EEXIST: + raise err + + +def _get_dir_serial(dirname): + _, serial = dirname.split(CHECKPOINT_SEPARATOR) + + try: + serial_num = int(serial) + except ValueError: + serial_num = -1 + return serial_num + + +def _get_serial_dir(dirname, serial): + serial_folder = CHECKPOINT_PREFIX + CHECKPOINT_SEPARATOR + str(serial) + serial_dir = os.path.join(dirname, serial_folder) + _make_chekcpoint_dirs(serial_dir) + + return serial_dir + + +def _get_model_dir(dirname): + model_dir = os.path.join(dirname, MODEL_DIR) + _make_chekcpoint_dirs(model_dir) + return model_dir + + +def _get_lookuptable_dir(dirname): + lookuptable_dir = os.path.join(dirname, LOOKUP_TABLE_DIR) + _make_chekcpoint_dirs(lookuptable_dir) + return lookuptable_dir + + +def _get_trainer_dir(dirname, trainer_id): + trainer_folder = TRAINER_PREFIX + CHECKPOINT_SEPARATOR + str(trainer_id) + trainer_dir = os.path.join(dirname, trainer_folder) + _make_chekcpoint_dirs(trainer_dir) + return trainer_dir + + +def _scroll_delete(dirname, max_num_checkpoints=3): + dirs = os.listdir(dirname) + serial_map = {} + for serial in dirs: + serial_num = _get_dir_serial(serial) + serial_map[serial_num] = serial + + if len(list(serial_map.keys())) <= max_num_checkpoints: + return + + serials = list(serial_map.keys()) + serials.sort(reverse=True) + serials = serials[max_num_checkpoints:] + for serial in serials: + cur_dir = _get_serial_dir(dirname, serial) + try: + shutil.rmtree(cur_dir) + except OSError as err: + if err.errno != errno.ENOENT: + raise err + + +def _write_success(dirname): + """ + write an empty file named "_SUCCESS" in checkpoint dir, indicate this checkpoint is correct. + + : param dirname + """ + success_file = os.path.join(dirname, SUCCESS_MARK_FILENAME) + with open(success_file, 'a') as f: + now = time.ctime() + f.write(now) + + +def _get_latest_checkpoint_serial(checkpoint_dir): + """ + get the latest file in checkpoint directory, the _SUCCESS file must exist in the directory + + : param checkpoint_dir + """ + if not checkpoint_dir: + return -1 + + def has_success(checkpoint_dir, cur_dir): + """ + is _SUCCESS in this dir + """ + + serial = _get_dir_serial(cur_dir) + if serial == -1 or not os.path.isdir( + os.path.join(checkpoint_dir, cur_dir)): + return -1 + + success_path = os.path.join( + _get_serial_dir(checkpoint_dir, serial), MODEL_DIR, + SUCCESS_MARK_FILENAME) + if os.path.isfile(success_path): + return serial + + if not os.path.isdir(checkpoint_dir): + return -1 + + current_dir = -1 + dirs = os.listdir(checkpoint_dir) + for cur_dir in dirs: + success_num = has_success(checkpoint_dir, cur_dir) + if success_num > current_dir: + current_dir = success_num + return current_dir diff --git a/python/paddle/fluid/transpiler/__init__.py b/python/paddle/fluid/transpiler/__init__.py index 6d3c1b947f..a8622ad544 100644 --- a/python/paddle/fluid/transpiler/__init__.py +++ b/python/paddle/fluid/transpiler/__init__.py @@ -11,12 +11,13 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from distribute_transpiler import DistributeTranspiler -from inference_transpiler import InferenceTranspiler -from memory_optimization_transpiler import memory_optimize, release_memory -from distribute_transpiler_simple import SimpleDistributeTranspiler + +from .distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig +from .inference_transpiler import InferenceTranspiler +from .memory_optimization_transpiler import memory_optimize, release_memory +from .ps_dispatcher import HashName, RoundRobin __all__ = [ - "DistributeTranspiler", "InferenceTranspiler", "SimpleDistributeTranspiler", - "memory_optimize", "release_memory" + "DistributeTranspiler", "InferenceTranspiler", "memory_optimize", + "release_memory", "HashName", "RoundRobin", "DistributeTranspilerConfig" ] diff --git a/python/paddle/fluid/transpiler/details/__init__.py b/python/paddle/fluid/transpiler/details/__init__.py new file mode 100644 index 0000000000..1bfab1f219 --- /dev/null +++ b/python/paddle/fluid/transpiler/details/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .program_utils import * +from .ufind import * diff --git a/python/paddle/fluid/transpiler/details/program_utils.py b/python/paddle/fluid/transpiler/details/program_utils.py new file mode 100644 index 0000000000..76d10777f5 --- /dev/null +++ b/python/paddle/fluid/transpiler/details/program_utils.py @@ -0,0 +1,37 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def delete_ops(block, ops): + try: + start = list(block.ops).index(ops[0]) + end = list(block.ops).index(ops[-1]) + [block._remove_op(start) for _ in range(end - start + 1)] + except Exception as e: + raise e + block.program._sync_with_cpp() + + +def find_op_by_input_arg(block, arg_name): + for index, op in enumerate(block.ops): + if arg_name in op.input_arg_names: + return index + return -1 + + +def find_op_by_output_arg(block, arg_name): + for index, op in enumerate(block.ops): + if arg_name in op.output_arg_names: + return index + return -1 diff --git a/python/paddle/fluid/transpiler/details/ufind.py b/python/paddle/fluid/transpiler/details/ufind.py new file mode 100644 index 0000000000..0e30d0e3f9 --- /dev/null +++ b/python/paddle/fluid/transpiler/details/ufind.py @@ -0,0 +1,64 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class UnionFind(object): + """ Union-find data structure. + + Union-find is a data structure that keeps track of a set of elements partitioned + into a number of disjoint (non-overlapping) subsets. + + Reference: + https://en.wikipedia.org/wiki/Disjoint-set_data_structure + + Args: + elements(list): The initialize element list. + """ + + def __init__(self, elementes=None): + self._parents = [] # index -> parent index + self._index = {} # element -> index + self._curr_idx = 0 + if not elementes: + elementes = [] + for ele in elementes: + self._parents.append(self._curr_idx) + self._index.update({ele: self._curr_idx}) + self._curr_idx += 1 + + def find(self, x): + # Find the root index of given element x, + # execute the path compress while findind the root index + if not x in self._index: + return -1 + idx = self._index[x] + while idx != self._parents[idx]: + t = self._parents[idx] + self._parents[idx] = self._parents[t] + idx = t + return idx + + def union(self, x, y): + # Union two given element + x_root = self.find(x) + y_root = self.find(y) + + if x_root == y_root: + return + self._parents[x_root] = y_root + + def is_connected(self, x, y): + # If two given elements have the same root index, + # then they are connected. + return self.find(x) == self.find(y) diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 640ac9f085..ff6e71bbfa 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -11,18 +11,41 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -from __future__ import print_function +""" +Steps to transpile trainer: +1. split variable to multiple blocks, aligned by product(dim[1:]) (width). +2. rename splited grad variables to add trainer_id suffix ".trainer_%d". +3. modify trainer program add split_op to each grad variable. +4. append send_op to send splited variables to server and +5. add recv_op to fetch params(splited blocks or origin param) from server. +6. append concat_op to merge splited blocks to update local weights. + +Steps to transpile pserver: +1. create new program for parameter server. +2. create params and grad variables that assigned to current server instance. +3. create a sub-block in the server side program +4. append ops that should run on current server instance. +5. add listen_and_serv op +""" import math +import random +import numpy as np -import distributed_splitter as splitter -from .. import core -from ..framework import Program, default_main_program, Variable, Parameter +from .ps_dispatcher import RoundRobin, HashName, PSDispatcher +from .. import core, framework +from ..framework import Program, default_main_program, \ + default_startup_program, Block, \ + Parameter, grad_var_name +from .details import * +from functools import reduce LOOKUP_TABLE_TYPE = "lookup_table" LOOKUP_TABLE_GRAD_TYPE = "lookup_table_grad" -RPC_CLIENT_VAR_NAME = "RPC_CLIENT_VAR" +OP_ROLE_VAR_ATTR_NAME = core.op_proto_and_checker_maker.kOpRoleVarAttrName() +RPC_OP_ROLE_ATTR_NAME = op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName( +) +RPC_OP_ROLE_ATTR_VALUE = core.op_proto_and_checker_maker.OpRole.RPC class VarBlock: @@ -36,85 +59,37 @@ class VarBlock: return "%s:%d:%d" % (self.varname, self.offset, self.size) -class UnionFind(object): - """ Union-find data structure. - - Union-find is a data structure that keeps track of a set of elements partitioned - into a number of disjoint (non-overlapping) subsets. - - Reference: - https://en.wikipedia.org/wiki/Disjoint-set_data_structure - - Args: - elements(list): The initialize element list. - """ - - def __init__(self, elementes=None): - self._parents = [] # index -> parent index - self._index = {} # element -> index - self._curr_idx = 0 - if not elementes: - elementes = [] - for ele in elementes: - self._parents.append(self._curr_idx) - self._index.update({ele: self._curr_idx}) - self._curr_idx += 1 - - def find(self, x): - # Find the root index of given element x, - # execute the path compress while findind the root index - if not x in self._index: - return -1 - idx = self._index[x] - while idx != self._parents[idx]: - t = self._parents[idx] - self._parents[idx] = self._parents[t] - idx = t - return idx - - def union(self, x, y): - # Union two given element - x_root = self.find(x) - y_root = self.find(y) - - if x_root == y_root: - return - self._parents[x_root] = y_root - - def is_connected(self, x, y): - # If two given elements have the same root index, - # then they are connected. - return self.find(x) == self.find(y) - - def same_or_split_var(p_name, var_name): return p_name == var_name or p_name.startswith(var_name + ".block") -def split_dense_variable(var_list, - pserver_count, - min_block_size=1024, - max_block_size=1048576): +def slice_variable(var_list, slice_count, min_block_size): """ - We may need to split dense tensor to one or more blocks and put - them equally onto parameter server. One block is a sub-tensor - aligned by dim[0] of the tensor. - - We need to have a minimal block size so that the calculations in - the parameter server side can gain better performance. By default - minimum block size is 1024. The max block size is used to prevent - very large blocks that may cause send error. - :return: A list of VarBlocks. Each VarBlock specifies a shard of - the var. + We may need to split dense tensor to one or more blocks and put + them equally onto parameter server. One block is a sub-tensor + aligned by dim[0] of the tensor. + + We need to have a minimal block size so that the calculations in + the parameter server side can gain better performance. By default + minimum block size 8K elements (maybe 16bit or 32bit or 64bit). + + Args: + var_list (list): List of variables. + slice_count (int): Numel of count that variables will be sliced, which + could be the pserver services' count. + min_block_size (int): Minimum splitted block size. + Returns: + blocks (list[(varname, block_id, current_block_size)]): A list + of VarBlocks. Each VarBlock specifies a shard of the var. """ blocks = [] for var in var_list: - split_count = pserver_count + split_count = slice_count var_numel = reduce(lambda x, y: x * y, var.shape) max_pserver_count = int(math.floor(var_numel / float(min_block_size))) if max_pserver_count == 0: max_pserver_count = 1 - if max_pserver_count < pserver_count: + if max_pserver_count < slice_count: split_count = max_pserver_count block_size = int(math.ceil(var_numel / float(split_count))) @@ -126,7 +101,7 @@ def split_dense_variable(var_list, block_size += dim1 - remains # update split_count after aligning split_count = int(math.ceil(var_numel / float(block_size))) - for block_id in xrange(split_count): + for block_id in range(split_count): curr_block_size = min(block_size, var_numel - ( (block_id) * block_size)) block = VarBlock(var.name, block_id, curr_block_size) @@ -134,181 +109,200 @@ def split_dense_variable(var_list, return blocks -def delete_ops(block, ops): - try: - start = list(block.ops).index(ops[0]) - end = list(block.ops).index(ops[-1]) - [block.remove_op(start) for _ in xrange(end - start + 1)] - except Exception, e: - raise e - block.program.sync_with_cpp() +class DistributeTranspilerConfig(object): + """ + slice_var_up (bool): Do Tensor slice for pservers, default is True. + split_method (PSDispatcher): RoundRobin or HashName can be used + try to choose the best method to balance loads for pservers. + min_block_size (int): Minimum splitted element number in block. + According:https://github.com/PaddlePaddle/Paddle/issues/8638#issuecomment-369912156 + We can use bandwidth effiently when data size is larger than 2MB.If you + want to change it, please be sure you see the slice_variable function. + """ + + slice_var_up = True + split_method = None + min_block_size = 8192 + + +class DistributeTranspiler(object): + """ + **DistributeTranspiler** + + Convert the fluid program to distributed data-parallelism programs. + + The main_program will be transformed to use a remote parameter server + to do parameter optimization. And the optimization graph will be put + into a parameter server program. + + Examples: + .. code-block:: python + + # Define your model before these codes. + port = os.getenv("PADDLE_PSERVER_PORT", "6174") + pserver_ips = os.getenv("PADDLE_PSERVER_IPS", "") + eplist = [] + for ip in pserver_ips.split(","): + eplist.append(':'.join([ip, port])) + pserver_endpoints = ",".join(eplist) + trainers = int(os.getenv("PADDLE_TRAINERS")) + current_endpoint = os.getenv("PADDLE_CURRENT_IP", "") + ":" + port + trainer_id = int(os.getenv("PADDLE_TRAINER_ID", "0")) + role = os.getenv("PADDLE_TRAINING_ROLE") + + t = distribute_transpiler.DistributeTranspiler() + t.transpile( + trainer_id, pservers=pserver_endpoints, trainers=trainers) + if role == "PSERVER": + pserver_program = t.get_pserver_program(current_endpoint) + pserver_startup_program = t.get_startup_program(current_endpoint, + pserver_program) + elif role == "TRAINER": + trainer_program = t.get_trainer_program() + """ + + def __init__(self, config=None): + if config is not None: + self.config = config + else: + self.config = DistributeTranspilerConfig() + if self.config.split_method is None: + self.config.split_method = RoundRobin + + assert (self.config.min_block_size >= 8192) + assert (self.config.split_method.__bases__[0] == PSDispatcher) -class DistributeTranspiler: def transpile(self, trainer_id, program=None, pservers="127.0.0.1:6174", trainers=1, - split_method=splitter.round_robin, sync_mode=True): """ - Transpile the program to distributed data-parallelism programs. - The main_program will be transformed to use a remote parameter server - to do parameter optimization. And the optimization graph will be put - into a parameter server program. - - Use different methods to split trainable variables to different - parameter servers. - - Steps to transpile trainer: - 1. split variable to multiple blocks, aligned by product(dim[1:]) (width). - 2. rename splited grad variables to add trainer_id suffix ".trainer_%d". - 3. modify trainer program add split_op to each grad variable. - 4. append send_op to send splited variables to server and fetch - params(splited blocks or origin param) from server. - 5. append concat_op to merge splited blocks to update local weights. - - Steps to transpile pserver: - 1. create new program for parameter server. - 2. create params and grad variables that assigned to current server instance. - 3. create a sub-block in the server side program - 4. append ops that should run on current server instance. - 5. add listen_and_serv op - - :param trainer_id: one unique id for each trainer in a job. - :type trainer_id: int - :param program: program to transpile, default is default_main_program - :type program: Program - :param pservers: parameter server endpoints like "m1:6174,m2:6174" - :type pservers: string - :param trainers: total number of workers/trainers in the job - :type trainers: int - :param split_method: A function to determin how to split variables - to different servers equally. - :type split_method: function - :type sync_mode: boolean default True - :param sync_mode: if sync_mode is set True, it means that dist transpiler - will transpile the program into sync_mode pserver and trainer program. + Run the transpiler. + + Args: + trainer_id (int): id for current trainer worker, if you have + n workers, the id may range from 0 ~ n-1 + program (Program|None): program to transpile, + default is fluid.default_main_program(). + pservers (str): comma separated ip:port string for the pserver + list. + trainers (int): number of trainers in the distributed job. + sync_mode (bool): Do sync training or not, default is True. """ - assert (callable(split_method)) if program is None: program = default_main_program() self.origin_program = program self.trainer_num = trainers self.sync_mode = sync_mode - # TODO(typhoonzero): currently trainer_id is fetched from cluster system - # like Kubernetes, we should port this to use etcd later when developing - # fluid distributed training with fault-tolerance. self.trainer_id = trainer_id pserver_endpoints = pservers.split(",") self.pserver_endpoints = pserver_endpoints - self.optimize_ops, params_grads = self._get_optimize_pass() - - # process lookup_table_op - # 1. check all lookup_table_op is distributed - # 2. check all lookup_table_op share the same table. - distributed_lookup_table_ops = [] - # support only one distributed_lookup_table now - self.table_name = None - for op in program.global_block().ops: - if op.type == LOOKUP_TABLE_TYPE: - if op.attrs['is_distributed'] is True: - if self.table_name is None: - self.table_name = op.input("W")[0] - if self.table_name != op.input("W")[0]: - raise RuntimeError("all distributed lookup_table_ops" - " should have only one table") - distributed_lookup_table_ops.append(op) - else: - if self.table_name is not None: - assert op.input("W")[0] != self.table_name - - self.has_distributed_lookup_table = len( - distributed_lookup_table_ops) > 0 - - # step1: For large parameters and gradients, split them into smaller - # blocks. - param_list = [] - grad_list = [] - for p, g in params_grads: - # skip parameter marked not trainable - if type(p) == Parameter and p.trainable == False: - continue - param_list.append(p) - grad_list.append(g) + self.optimize_ops, self.params_grads = self._get_optimize_pass() + + ps_dispatcher = self.config.split_method(self.pserver_endpoints) + self.has_distributed_lookup_table = self._has_distributed_lookup_table() + + # split and create vars, then put splited vars in dicts for later use. + self._init_splited_vars() + + # step 3.1: insert send op to send gradient vars to parameter servers + ps_dispatcher.reset() + send_vars = [] + + # in general cases, the number of pservers is times of 2, and this + # will lead to uneven distribution among weights and bias: + # fc_w@GRAD_trainer_0, fc_w@GRAD_trainer_1 --> pserver1 + # fc_b@GRAD_trainer_0, fc_b@GRAD_trainer_1 --> pserver2 + # shuffle the map will avoid the uneven distribution above + grad_var_mapping_items = list(self.grad_var_mapping.items()) + if not self.config.slice_var_up: + random.seed(self.trainer_num) + random.shuffle(grad_var_mapping_items) + + for orig_varname, splited_vars in grad_var_mapping_items: + eplist = ps_dispatcher.dispatch(splited_vars) + + if not self.config.slice_var_up: + assert (len(splited_vars) == 1) + + if len(splited_vars) == 1: + orig_varname = splited_vars[0].name + index = find_op_by_output_arg(program.global_block(), + orig_varname) + elif len(splited_vars) > 1: + orig_var = program.global_block().vars[orig_varname] + index = find_op_by_output_arg(program.global_block(), + orig_varname) + self._insert_split_op(program, orig_var, index, splited_vars) + index += 1 + else: + AssertionError("Can not insert the send op by original " + "variable name :", orig_varname) + + program.global_block()._insert_op( + index=index + 1, + type="send", + inputs={"X": splited_vars}, + outputs={}, + attrs={ + "epmap": eplist, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + }) + for _, var in enumerate(splited_vars): + send_vars.append(var) - if self.has_distributed_lookup_table: - param_list = [ - param for param in param_list if param.name != self.table_name - ] - grad_list = [ - grad for grad in grad_list - if grad.name != framework.grad_var_name(self.table_name) - ] - self.table_param_grad = [ - param_grad for param_grad in params_grads - if param_grad[0].name == self.table_name - ][0] - table_grad_var = self.table_param_grad[1] - self.table_grad_list = [ - program.global_block().create_var( - name="%s.trainer_%d.pserver_%d" % - (table_grad_var.name, trainer_id, index), - type=table_grad_var.type, - shape=table_grad_var.shape, - dtype=table_grad_var.dtype) - for index in range(len(self.pserver_endpoints)) - ] + if self.sync_mode: + program.global_block().append_op( + type="send_barrier", + inputs={}, + outputs={}, + attrs={ + "endpoints": pserver_endpoints, + "sync_mode": self.sync_mode, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + }) + + # step 3.2: insert recv op to receive parameters from parameter server + recv_vars = [] + for _, var in enumerate(send_vars): + recv_vars.append(self.grad_param_mapping[var]) + ps_dispatcher.reset() + eplist = ps_dispatcher.dispatch(recv_vars) - grad_blocks = split_dense_variable(grad_list, len(pserver_endpoints)) - param_blocks = split_dense_variable(param_list, len(pserver_endpoints)) - # step2: Create new vars for the parameters and gradients blocks and - # add ops to do the split. - grad_var_mapping = self._append_split_op(program, grad_blocks) - param_var_mapping = self._create_vars_from_blocklist(program, - param_blocks) - # step3: Add gradients as send op inputs and parameters as send - # op outputs. - send_inputs = [] - send_outputs = [] - for b in grad_blocks: # append by order - varname, block_id, _ = b.split(":") - send_inputs.append(grad_var_mapping[varname][int(block_id)]) - for b in param_blocks: - varname, block_id, _ = b.split(":") - send_outputs.append(param_var_mapping[varname][int(block_id)]) - # let send_op know which endpoint to send which var to, eplist has the same - # order as send_inputs. - eplist = split_method(send_inputs, pserver_endpoints) - # create mapping of endpoint -> split var to create pserver side program - self.param_grad_ep_mapping = dict() for i, ep in enumerate(eplist): - param = send_outputs[i] - grad = send_inputs[i] - if not self.param_grad_ep_mapping.has_key(ep): - self.param_grad_ep_mapping[ep] = {"params": [], "grads": []} - self.param_grad_ep_mapping[ep]["params"].append(param) - self.param_grad_ep_mapping[ep]["grads"].append(grad) - - rpc_client_var = program.global_block().create_var( - name=RPC_CLIENT_VAR_NAME, - persistable=True, - type=core.VarDesc.VarType.RAW) + self.param_grad_ep_mapping[ep]["params"].append(recv_vars[i]) + self.param_grad_ep_mapping[ep]["grads"].append(send_vars[i]) - # create send_op - program.global_block().append_op( - type="send", - inputs={"X": send_inputs}, - outputs={"Out": send_outputs, - "RPCClient": rpc_client_var}, - attrs={ - "endpoints": pserver_endpoints, - "epmap": eplist, - "sync_mode": self.sync_mode - }) # step4: Concat the parameters splits together after recv. - for varname, splited_var in param_var_mapping.iteritems(): + for varname, splited_var in list(self.param_var_mapping.items()): + eps = [] + for var in splited_var: + index = [v.name for v in recv_vars].index(var.name) + eps.append(eplist[index]) + + program.global_block().append_op( + type="recv", + inputs={}, + outputs={"Out": splited_var}, + attrs={ + "epmap": eps, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + }) + + if self.sync_mode: + program.global_block().append_op( + type="fetch_barrier", + inputs={}, + outputs={}, + attrs={ + "endpoints": pserver_endpoints, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + }) + + for varname, splited_var in list(self.param_var_mapping.items()): if len(splited_var) <= 1: continue orig_param = program.global_block().vars[varname] @@ -319,28 +313,41 @@ class DistributeTranspiler: attrs={"axis": 0}) if self.has_distributed_lookup_table: - self._replace_lookup_table_op_with_prefetch(program, rpc_client_var, - eplist) - self._split_table_grad_and_add_send_vars(program, rpc_client_var, - pserver_endpoints) + self._replace_lookup_table_op_with_prefetch(program, + pserver_endpoints) + self._split_table_grad_and_add_send_vars(program, pserver_endpoints) def get_trainer_program(self): + """ + Get transpiled trainer side program. + + Returns: + Program: trainer side program. + """ # remove optimize ops and add a send op to main_program + # FIXME(typhoonzero): Also ops like clip_gradient, lrn_decay? delete_ops(self.origin_program.global_block(), self.optimize_ops) - # FIXME(typhoonzero): serialize once will fix error occurs when clone. self.origin_program.__str__() return self.origin_program def get_pserver_program(self, endpoint): """ - Get pserver side program using the endpoint. - TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers. - NOTE: assume blocks of the same variable is not distributed - on the same pserver, only change param/grad varnames for - trainers to fetch. + Get parameter server side program. + + Args: + endpoint (str): current parameter server endpoint. + + Returns: + Program: the program for current parameter server to run. """ + # TODO(panyx0718): Revisit this assumption. what if #blocks > #pservers. + # NOTE: assume blocks of the same variable is not distributed + # on the same pserver, only change param/grad varnames for + # trainers to fetch. + # step1 pserver_program = Program() + pserver_program.random_seed = self.origin_program.random_seed # step2: Create vars to receive vars at parameter servers. recv_inputs = [] for v in self.param_grad_ep_mapping[endpoint]["params"]: @@ -350,7 +357,6 @@ class DistributeTranspiler: # we don't need to create them when grad arrives. # change client side var name to origin name by # removing ".trainer_%d" suffix - suff_idx = v.name.find(".trainer_") if suff_idx >= 0: orig_var_name = v.name[:suff_idx] @@ -366,7 +372,7 @@ class DistributeTranspiler: dtype=v.dtype, shape=v.shape) if self.sync_mode and self.trainer_num > 1: - for trainer_id in xrange(self.trainer_num): + for trainer_id in range(self.trainer_num): var = pserver_program.global_block().create_var( name="%s.trainer_%d" % (orig_var_name, trainer_id), persistable=False, @@ -387,104 +393,141 @@ class DistributeTranspiler: # located on current pserver opt_op_on_pserver = [] for _, op in enumerate(self.optimize_ops): - if self._is_opt_op(op) and self._is_opt_op_on_pserver(endpoint, op): + if self._is_optimizer_op(op) and self._is_opt_op_on_pserver( + endpoint, op): opt_op_on_pserver.append(op) # step 3.3 # Iterate through the ops, and if an op and the optimize ops # which located on current pserver are in one set, then # append it into the sub program. - # We try to put optimization program run parallelly, assume - # optimization program always looks like: - # - # prevop -> prevop -> opt op -> following op -> following op; -> - # prevop -> prevop -> opt op -> following op -> following op; -> - # global op -> global op - # - # we put operators that can run parallelly to many program blocks. - # in above example, we seperate ops by the ";". Global ops must run - # after all the optimize ops finished. - global_ops = [] - # HACK: optimization global ops only used to scale beta1 and beta2 - # replace it with dependency engine. - for op in self.optimize_ops: - if self._is_adam_connected_op(op): - global_ops.append(op) - def __append_optimize_op__(op, block, grad_to_block_id): - if self._is_opt_op(op): + def __append_optimize_op__(op, block, grad_to_block_id, merged_var, + lr_ops): + if self._is_optimizer_op(op): self._append_pserver_ops(block, op, endpoint, grad_to_block_id, - default_main_program()) - else: + self.origin_program, merged_var) + elif op not in lr_ops: self._append_pserver_non_opt_ops(block, op) + def __op_have_grad_input__(op): + for varname in op.input_arg_names: + if varname.find("@GRAD") >= 0: + return varname + return "" + + def __clone_lr_op_sub_block__(op, program, lr_block): + if not op.has_attr('sub_block'): + return + + origin_block_desc = op.attr('sub_block') + origin_block = self.origin_program.block(origin_block_desc.id) + assert isinstance(origin_block, Block) + # we put the new sub block to new block to follow the block + # hierarchy of the original blocks + new_sub_block = program.create_block(lr_block.idx) + + # clone vars + for var in origin_block.vars: + new_sub_block._clone_variable(var) + + # clone ops + for origin_op in origin_block.ops: + cloned_op = self._clone_lr_op(program, new_sub_block, origin_op) + # clone sub_block of op + __clone_lr_op_sub_block__(cloned_op, program, new_sub_block) + + # reset the block of op + op.set_attr('sub_block', new_sub_block) + # append lr decay ops to the child block if exists lr_ops = self._get_lr_ops() + # record optimize blocks and we can run them on pserver parallel + optimize_blocks = [] if len(lr_ops) > 0: lr_decay_block = pserver_program.create_block( pserver_program.num_blocks - 1) + optimize_blocks.append(lr_decay_block) for _, op in enumerate(lr_ops): - self._append_pserver_non_opt_ops(lr_decay_block, op) + cloned_op = self._append_pserver_non_opt_ops(lr_decay_block, op) + # append sub blocks to pserver_program in lr_decay_op + __clone_lr_op_sub_block__(cloned_op, pserver_program, + lr_decay_block) # append op to the current block grad_to_block_id = [] pre_block_idx = pserver_program.num_blocks - 1 for idx, opt_op in enumerate(opt_op_on_pserver): per_opt_block = pserver_program.create_block(pre_block_idx) + optimize_blocks.append(per_opt_block) + # append grad merging ops before clip and weight decay + # cases may like: + # L2Decay op -> clip op -> optimize + for _, op in enumerate(self.optimize_ops): + # find the origin @GRAD var before clipping + grad_varname_for_block = __op_have_grad_input__(op) + if ufind.is_connected(op, opt_op) and grad_varname_for_block: + merged_var = self._append_pserver_grad_merge_ops( + per_opt_block, grad_varname_for_block, endpoint, + grad_to_block_id, self.origin_program) + break # append optimize op once then append other ops. for _, op in enumerate(self.optimize_ops): # optimizer is connected to itself if ufind.is_connected(op, opt_op) and op not in global_ops: - __append_optimize_op__(op, per_opt_block, grad_to_block_id) + __append_optimize_op__(op, per_opt_block, grad_to_block_id, + merged_var, lr_ops) + # dedup grad to ids list + grad_to_block_id = list(set(grad_to_block_id)) # append global ops if global_ops: opt_state_block = pserver_program.create_block( pserver_program.num_blocks - 1) + optimize_blocks.append(opt_state_block) for glb_op in global_ops: __append_optimize_op__(glb_op, opt_state_block, - grad_to_block_id) - - # NOT USED: single block version: - # - # for _, op in enumerate(self.optimize_ops): - # for _, opt_op in enumerate(opt_op_on_pserver): - # if ufind.is_connected(op, opt_op): - # __append_optimize_op__(glb_op, optimize_block) - # break + grad_to_block_id, None, lr_ops) # process distributed lookup_table - prefetch_block = None + prefetch_var_name_to_block_id = [] if self.has_distributed_lookup_table: pserver_index = self.pserver_endpoints.index(endpoint) table_opt_block = self._create_table_optimize_block( - pserver_index, pserver_program, pre_block_idx) - prefetch_block = self._create_prefetch_block( + pserver_index, pserver_program, pre_block_idx, grad_to_block_id) + optimize_blocks.append(table_opt_block) + prefetch_var_name_to_block_id = self._create_prefetch_block( pserver_index, pserver_program, table_opt_block) + checkpoint_block_id = self._create_checkpoint_save_block( + pserver_program, table_opt_block.idx) # NOTE: if has_distributed_lookup_table is False, then prefetch_block will # not be executed, so it's safe to use optimize_block to hold the place if self.has_distributed_lookup_table: - assert prefetch_block is not None + assert len(prefetch_var_name_to_block_id) > 0 else: - assert prefetch_block is None - prefetch_block = pserver_program.global_block() + assert len(prefetch_var_name_to_block_id) == 0 + + attrs = { + "optimize_blocks": optimize_blocks, + "endpoint": endpoint, + "Fanin": self.trainer_num, + "sync_mode": self.sync_mode, + "grad_to_block_id": grad_to_block_id, + } + if len(prefetch_var_name_to_block_id) > 0: + attrs['prefetch_var_name_to_block_id'] \ + = prefetch_var_name_to_block_id + attrs['checkpint_block_id'] = checkpoint_block_id # step5 append the listen_and_serv op pserver_program.global_block().append_op( type="listen_and_serv", inputs={'X': recv_inputs}, outputs={}, - attrs={ - "OptimizeBlock": pserver_program.block(1), - "endpoint": endpoint, - "Fanin": self.trainer_num, - "PrefetchBlock": prefetch_block, - "sync_mode": self.sync_mode, - "grad_to_block_id": grad_to_block_id - }) - - pserver_program.sync_with_cpp() + attrs=attrs) + + pserver_program._sync_with_cpp() return pserver_program def get_startup_program(self, endpoint, pserver_program): @@ -492,9 +535,18 @@ class DistributeTranspiler: Get startup program for current parameter server. Modify operator input variables if there are variables that were split to several blocks. + + Args: + endpoint (str): current pserver endpoint. + pserver_program (Program): call get_pserver_program first and + pass the result here. + + Returns: + Program: parameter server side startup program. """ s_prog = Program() - orig_s_prog = framework.default_startup_program() + orig_s_prog = default_startup_program() + s_prog.random_seed = orig_s_prog.random_seed params = self.param_grad_ep_mapping[endpoint]["params"] def _get_splited_name_and_shape(varname): @@ -507,13 +559,12 @@ class DistributeTranspiler: # 1. create vars in pserver program to startup program pserver_vars = pserver_program.global_block().vars created_var_map = dict() - for _, var in pserver_vars.iteritems(): - tmpvar = s_prog.global_block().clone_variable(var) + for _, var in list(pserver_vars.items()): + tmpvar = s_prog.global_block()._clone_variable(var) created_var_map[var.name] = tmpvar # 2. rename op outputs for op in orig_s_prog.global_block().ops: - new_inputs = dict() new_outputs = dict() # do not append startup op if var is not on this pserver op_on_pserver = False @@ -526,27 +577,164 @@ class DistributeTranspiler: op_on_pserver = True new_outputs[key] = pserver_vars[op.output(key)[0]] - # most startup program ops have no inputs - new_inputs = self._get_input_map_from_op(pserver_vars, op) - if op_on_pserver: + # most startup program ops have no inputs + new_inputs = self._get_input_map_from_op(pserver_vars, op) + if op.type in [ "gaussian_random", "fill_constant", "uniform_random" ]: - op.attrs["shape"] = new_outputs["Out"].shape + op.set_attr("shape", list(new_outputs["Out"].shape)) s_prog.global_block().append_op( type=op.type, inputs=new_inputs, outputs=new_outputs, - attrs=op.attrs) + attrs=op.all_attrs()) return s_prog + # ====================== private transpiler functions ===================== + + def _has_distributed_lookup_table(self): + # process lookup_table_op + # 1. check all lookup_table_op is distributed + # 2. check all lookup_table_op share the same table. + distributed_lookup_table_ops = [] + # support only one distributed_lookup_table now + self.table_name = None + for op in self.origin_program.global_block().ops: + if op.type == LOOKUP_TABLE_TYPE: + if op.attr('is_distributed') is True: + if self.table_name is None: + self.table_name = op.input("W")[0] + if self.table_name != op.input("W")[0]: + raise RuntimeError("all distributed lookup_table_ops" + " should have only one table") + distributed_lookup_table_ops.append(op) + else: + if self.table_name is not None: + assert op.input("W")[0] != self.table_name + + return len(distributed_lookup_table_ops) > 0 + + def _update_dist_lookup_table_vars(self, param_list, grad_list, + params_grads): + # TODO(wuyi): put find a way to put dist lookup table stuff all together. + # update self.table_param_grad and self.trainer_side_table_grad_list + program = self.origin_program + if self.has_distributed_lookup_table: + param_list = [ + param for param in param_list if param.name != self.table_name + ] + grad_list = [ + grad for grad in grad_list + if grad.name != grad_var_name(self.table_name) + ] + self.table_param_grad = [ + param_grad for param_grad in params_grads + if param_grad[0].name == self.table_name + ][0] + table_grad_var = self.table_param_grad[1] + if self.sync_mode: + self.trainer_side_table_grad_list = [ + program.global_block().create_var( + name="%s.trainer_%d.pserver_%d" % + (table_grad_var.name, self.trainer_id, index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(len(self.pserver_endpoints)) + ] + else: + self.trainer_side_table_grad_list = [ + program.global_block().create_var( + name="%s.pserver_%d" % (table_grad_var.name, index), + type=table_grad_var.type, + shape=table_grad_var.shape, + dtype=table_grad_var.dtype) + for index in range(len(self.pserver_endpoints)) + ] + return param_list, grad_list + + def _init_splited_vars(self): + # update these mappings for further transpile: + # 1. param_var_mapping: param var name -> [splited params vars] + # 2. grad_var_mapping: grad var name -> [splited grads vars] + # 3. grad_param_mapping: grad.blockx -> param.blockx + # 4. param_grad_ep_mapping: ep -> {"params": [], "grads": []} + + param_list = [] + grad_list = [] + param_grad_set = set() + for p, g in self.params_grads: + # skip parameter marked not trainable + if type(p) == Parameter and p.trainable == False: + continue + if p.name not in param_grad_set: + param_list.append(p) + param_grad_set.add(p.name) + if g.name not in param_grad_set: + grad_list.append(g) + param_grad_set.add(g.name) + + param_list, grad_list = self._update_dist_lookup_table_vars( + param_list, grad_list, self.params_grads) + + if self.config.slice_var_up: + # when we slice var up into blocks, we will slice the var according to + # pserver services' count. A pserver may have two or more listening ports. + grad_blocks = slice_variable(grad_list, + len(self.pserver_endpoints), + self.config.min_block_size) + param_blocks = slice_variable(param_list, + len(self.pserver_endpoints), + self.config.min_block_size) + else: + # when we do NOT slice var up into blocks, we will always slice params + # grads into one block. + grad_blocks = slice_variable(grad_list, 1, + self.config.min_block_size) + param_blocks = slice_variable(param_list, 1, + self.config.min_block_size) + assert (len(grad_blocks) == len(param_blocks)) + + # origin_varname -> [splited_var] + self.param_var_mapping = self._create_vars_from_blocklist( + self.origin_program, param_blocks) + self.grad_var_mapping = self._create_vars_from_blocklist( + self.origin_program, + grad_blocks, + add_trainer_suffix=self.trainer_num > 1) + self.grad_param_mapping = dict() + for g, p in zip(grad_blocks, param_blocks): + g_name, g_bid, _ = g.split(":") + p_name, p_bid, _ = p.split(":") + self.grad_param_mapping[self.grad_var_mapping[g_name][int(g_bid)]] = \ + self.param_var_mapping[p_name][int(p_bid)] + + # create mapping of endpoint -> split var to create pserver side program + self.param_grad_ep_mapping = dict() + [ + self.param_grad_ep_mapping.update({ + ep: { + "params": [], + "grads": [] + } + }) for ep in self.pserver_endpoints + ] + # transpiler function for dis lookup_table - def _replace_lookup_table_op_with_prefetch(self, program, rpc_client_var, - eplist): + def _replace_lookup_table_op_with_prefetch(self, program, + pserver_endpoints): # 1. replace lookup_table_op with split_ids_op -> prefetch_op -> sum_op - self.prefetch_input_vars = None - self.prefetch_output_vars = None + # self.all_prefetch_input_vars = + # [[var0_prefetch_in_pserver0, var0_prefetch_in_pserver1] + # [var1_prefetch_in_pserver0, var1_prefetch_in_pserver1]] + self.all_prefetch_input_vars = [] + + # self.all_prefetch_input_vars = + # [[var0_prefetch_in_pserver0, var0_prefetch_in_pserver1] + # [var1_prefetch_in_pserver0, var1_prefetch_in_pserver1]] + self.all_prefetch_output_vars = [] continue_search_lookup_table_op = True while continue_search_lookup_table_op: @@ -556,26 +744,27 @@ class DistributeTranspiler: if op.type == LOOKUP_TABLE_TYPE: continue_search_lookup_table_op = True - op_index = list(all_ops).index(op) + lookup_table_op_index = list(all_ops).index(op) ids_name = op.input("Ids") out_name = op.output("Out") - if self.prefetch_input_vars is None: - ids_var = program.global_block().vars[ids_name[0]] - self.prefetch_input_vars = self.create_splited_vars( - source_var=ids_var, - block=program.global_block(), - tag="_prefetch_in_") - if self.prefetch_output_vars is None: - out_var = program.global_block().vars[out_name[0]] - self.prefetch_output_vars = self.create_splited_vars( - source_var=out_var, - block=program.global_block(), - tag="_prefetch_out_") + ids_var = program.global_block().vars[ids_name[0]] + prefetch_input_vars = self._create_splited_vars( + source_var=ids_var, + block=program.global_block(), + tag="_prefetch_in_") + self.all_prefetch_input_vars.append(prefetch_input_vars) + + out_var = program.global_block().vars[out_name[0]] + prefetch_output_vars = self._create_splited_vars( + source_var=out_var, + block=program.global_block(), + tag="_prefetch_out_") + self.all_prefetch_output_vars.append(prefetch_output_vars) # insert split_ids_op - program.global_block().insert_op( - index=op_index, + program.global_block()._insert_op( + index=lookup_table_op_index, type="split_ids", inputs={ 'Ids': [ @@ -583,103 +772,107 @@ class DistributeTranspiler: for varname in ids_name ] }, - outputs={"Out": self.prefetch_input_vars}) + outputs={"Out": prefetch_input_vars}) # insert prefetch_op - program.global_block().insert_op( - index=op_index + 1, + program.global_block()._insert_op( + index=lookup_table_op_index + 1, type="prefetch", - inputs={'X': self.prefetch_input_vars}, - outputs={ - "Out": self.prefetch_output_vars, - "RPCClient": rpc_client_var - }, - attrs={"epmap": eplist}) + inputs={'X': prefetch_input_vars}, + outputs={"Out": prefetch_output_vars}, + attrs={ + "epmap": pserver_endpoints, + # FIXME(qiao) temporarily disable this config because prefetch + # is not act as other rpc op, it's more like a forward op + # RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + }) # insert concat_op - program.global_block().insert_op( - index=op_index + 2, - type="concat", - inputs={'X': self.prefetch_output_vars}, + program.global_block()._insert_op( + index=lookup_table_op_index + 2, + type="merge_ids", + inputs={ + 'Ids': [ + program.global_block().vars[varname] + for varname in ids_name + ], + 'X': prefetch_output_vars + }, outputs={ "Out": [ program.global_block().vars[varname] for varname in out_name ] - }, - attrs={"axis": 0}) + }) # delete lookup_table_op delete_ops(program.global_block(), [op]) # break for loop break - def _split_table_grad_and_add_send_vars(self, program, rpc_client_var, - pserver_endpoints): - # 2. add split_ids_op and send_vars_op to send gradient to pservers + def _split_table_grad_and_add_send_vars(self, program, pserver_endpoints): + # 2. add split_ids_op and send_op to send gradient to pservers # there should only be one table_name all_ops = program.global_block().ops - table_grad_name = framework.grad_var_name(self.table_name) + table_grad_name = grad_var_name(self.table_name) for op in all_ops: if table_grad_name in op.output_arg_names: op_index = list(all_ops).index(op) # insert split_ids_op - program.global_block().insert_op( + program.global_block()._insert_op( index=op_index + 1, type="split_ids", inputs={ 'Ids': [program.global_block().vars[table_grad_name]] }, - outputs={"Out": self.table_grad_list}) - program.global_block().insert_op( + outputs={"Out": self.trainer_side_table_grad_list}) + program.global_block()._insert_op( index=op_index + 2, - type="send_vars", - inputs={'X': self.table_grad_list}, - outputs={"RPCClient": rpc_client_var}, - attrs={"sync_send": True, - "epmap": pserver_endpoints}) + type="send", + inputs={'X': self.trainer_side_table_grad_list}, + outputs={}, + attrs={ + "sync_mode": True, + "epmap": pserver_endpoints, + RPC_OP_ROLE_ATTR_NAME: RPC_OP_ROLE_ATTR_VALUE + }) break def _create_prefetch_block(self, pserver_index, pserver_program, optimize_block): # STEP: create prefetch block table_var = pserver_program.global_block().vars[self.table_name] - prefetch_block = pserver_program.create_block(optimize_block.idx) - trainer_ids = self.prefetch_input_vars[pserver_index] - pserver_ids = pserver_program.global_block().create_var( - name=trainer_ids.name, - type=trainer_ids.type, - shape=trainer_ids.shape, - dtype=trainer_ids.dtype) - trainer_out = self.prefetch_output_vars[pserver_index] - pserver_out = pserver_program.global_block().create_var( - name=trainer_out.name, - type=trainer_out.type, - shape=trainer_out.shape, - dtype=trainer_out.dtype) - prefetch_block.append_op( - type="lookup_sparse_table", - inputs={'Ids': pserver_ids, - "W": table_var}, - outputs={"Out": pserver_out}, - attrs={ - "is_sparse": True, # has no effect on lookup_table op - "is_distributed": True, - "padding_idx": -1 - }) - return prefetch_block + prefetch_var_name_to_block_id = [] + for index in range(len(self.all_prefetch_input_vars)): + prefetch_block = pserver_program.create_block(optimize_block.idx) + trainer_ids = self.all_prefetch_input_vars[index][pserver_index] + pserver_ids = pserver_program.global_block().create_var( + name=trainer_ids.name, + type=trainer_ids.type, + shape=trainer_ids.shape, + dtype=trainer_ids.dtype) + trainer_out = self.all_prefetch_output_vars[index][pserver_index] + pserver_out = pserver_program.global_block().create_var( + name=trainer_out.name, + type=trainer_out.type, + shape=trainer_out.shape, + dtype=trainer_out.dtype) + prefetch_block.append_op( + type="lookup_sparse_table", + inputs={'Ids': pserver_ids, + "W": table_var}, + outputs={"Out": pserver_out}, + attrs={ + "is_sparse": True, # has no effect on lookup_table op + "is_distributed": True, + "padding_idx": -1 + }) + prefetch_var_name_to_block_id.append(trainer_ids.name + ":" + str( + prefetch_block.idx)) + return prefetch_var_name_to_block_id def _create_table_optimize_block(self, pserver_index, pserver_program, - pre_block_idx): - def _clone_var(block, var, persistable=True): - assert isinstance(var, Variable) - return block.create_var( - name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - persistable=persistable) - + pre_block_idx, grad_to_block_id): # STEP: create table optimize block # create table param and grad var in pserver program origin_param_var = self.origin_program.global_block().vars[ @@ -690,25 +883,24 @@ class DistributeTranspiler: dtype=origin_param_var.dtype, type=core.VarDesc.VarType.SELECTED_ROWS, persistable=True) - grad_var = _clone_var( - pserver_program.global_block(), - self.origin_program.global_block().vars[framework.grad_var_name( - self.table_name)], - persistable=False) + # parameter must be selected rows + param_var.desc.set_type(core.VarDesc.VarType.SELECTED_ROWS) + grad_var = pserver_program.global_block()._clone_variable( + self.origin_program.global_block().vars[grad_var_name( + self.table_name)]) # create table optimize block in pserver program table_opt_op = [ op for op in self.optimize_ops - if op.input("Param")[0] == self.table_name + if 'Param' in op.input_names and op.input("Param")[0] == + self.table_name ][0] table_opt_block = pserver_program.create_block(pre_block_idx) - # only support sgd now - assert table_opt_op.type == "sgd" if self.sync_mode: # create grad vars in pserver program table_grad_var = self.table_param_grad[1] - table_grad_list = [ + pserver_side_table_grad_list = [ pserver_program.global_block().create_var( name="%s.trainer_%d.pserver_%d" % (table_grad_var.name, index, pserver_index), @@ -718,11 +910,22 @@ class DistributeTranspiler: for index in range(self.trainer_num) ] - # append sum op for table_grad_list + # append sum op for pserver_side_table_grad_list table_opt_block.append_op( type="sum", - inputs={"X": table_grad_list}, - outputs={"Out": [grad_var]}) + inputs={"X": pserver_side_table_grad_list}, + outputs={"Out": [grad_var]}, + attrs={"use_mkldnn": False}) + else: + # in async_mode, for table gradient, it also need to be splited to each parameter server + origin_grad_name = grad_var.name + splited_grad_name = self.trainer_side_table_grad_list[ + pserver_index].name + if not splited_grad_name.startswith(origin_grad_name): + raise ValueError("origin_grad_var: " + splited_grad_name + + " grad_var:" + grad_var.name) + grad_var = pserver_program.global_block()._rename_var( + origin_grad_name, splited_grad_name) lr_var = pserver_program.global_block().vars[table_opt_op.input( "LearningRate")[0]] @@ -732,15 +935,39 @@ class DistributeTranspiler: "LearningRate": [lr_var] } outputs = {"ParamOut": [param_var]} - table_opt_block.append_op( - type=table_opt_op.type, - inputs=inputs, - outputs=outputs, - attrs=table_opt_op.attrs) + # only support sgd now + import logging + logging.warn( + "distribute lookup table only support sgd optimizer, change it's optimizer to sgd instead of " + + table_opt_op.type) + table_opt_block.append_op(type="sgd", inputs=inputs, outputs=outputs) + + # add table parameter gradient and it's block id to grad_to_block_id + grad_to_block_id.append(grad_var.name + ":" + str(table_opt_block.idx)) return table_opt_block - # ====================== private transpiler functions ===================== + def _create_checkpoint_save_block(self, pserver_program, pre_block_idx): + """ + create a new block to handle save checkpoint. + """ + import os + + pserver_program.global_block().create_var( + name="kLookupTablePath", + persistable=True, + type=core.VarDesc.VarType.RAW) + + checkpoint_save_block = pserver_program.create_block(pre_block_idx) + # this 'file_path' do not be used in save lookup table variable + checkpoint_save_block.append_op( + type='save', + inputs={'X': [self.table_name]}, + outputs={}, + attrs={'file_path': "none"}) + + return checkpoint_save_block.idx + def _create_vars_from_blocklist(self, program, block_list, @@ -749,22 +976,32 @@ class DistributeTranspiler: Create vars for each split. NOTE: only grads need to be named for different trainers, use add_trainer_suffix to rename the grad vars. - :return: A dict mapping from original var name to each var split. + Args: + program (ProgramDesc): ProgramDesc which gradients blong. + block_list (list[(varname, block_id, block_size)]): List of gradient blocks. + add_trainer_suffix (Bool): Add trainer suffix to new variable's name if set True. + Returns: + var_mapping (dict(varname->[new_varname_variable])):A dict mapping + from original var name to each var split. """ + + # varname->[(block_id, current_block_size)] block_map = dict() + var_mapping = dict() for block_str in block_list: varname, offset, size = block_str.split(":") - if not block_map.has_key(varname): + if varname not in block_map: block_map[varname] = [] - block_map[varname].append((long(offset), long(size))) - for varname, splited in block_map.iteritems(): + block_map[varname].append((int(offset), int(size))) + + for varname, splited in list(block_map.items()): orig_var = program.global_block().var(varname) if len(splited) == 1: if self.sync_mode and add_trainer_suffix: new_var_name = "%s.trainer_%d" % \ (orig_var.name, self.trainer_id) - program.global_block().rename_var(varname, new_var_name) + program.global_block()._rename_var(varname, new_var_name) var_mapping[varname] = \ [program.global_block().var(new_var_name)] else: @@ -798,10 +1035,10 @@ class DistributeTranspiler: type=orig_var.type, shape=splited_shape) # flattend splited var var_mapping[varname].append(var) - program.global_block().sync_with_cpp() + program.global_block()._sync_with_cpp() return var_mapping - def create_splited_vars(self, source_var, block, tag): + def _create_splited_vars(self, source_var, block, tag): return [ block.create_var( name=str(source_var.name + tag + str(index)), @@ -812,7 +1049,6 @@ class DistributeTranspiler: ] def _clone_var(self, block, var, persistable=True): - assert isinstance(var, Variable) return block.create_var( name=var.name, shape=var.shape, @@ -821,41 +1057,31 @@ class DistributeTranspiler: lod_level=var.lod_level, persistable=persistable) - def _append_split_op(self, program, gradblocks): - # Split variables that need to be split and append respective ops - add_suffix = False - if self.trainer_num > 1: - add_suffix = True - var_mapping = self._create_vars_from_blocklist( - program, gradblocks, add_trainer_suffix=add_suffix) - for varname, splited_vars in var_mapping.iteritems(): - # variable that don't need to split have empty splited_vars - if len(splited_vars) <= 1: - continue - orig_var = program.global_block().vars[varname] - if orig_var.type == core.VarDesc.VarType.SELECTED_ROWS: - height_sections = [] - for v in splited_vars: - height_sections.append(v.shape[0]) - program.global_block().append_op( - type="split_selected_rows", - inputs={"X": orig_var}, - outputs={"Out": splited_vars}, - attrs={"height_sections": height_sections}) - elif orig_var.type == core.VarDesc.VarType.LOD_TENSOR: - sections = [] - for v in splited_vars: - sections.append(v.shape[0]) - program.global_block().append_op( - type="split_byref", - inputs={"X": orig_var}, - outputs={"Out": splited_vars}, - attrs={"sections": sections} # assume split evenly - ) - else: - AssertionError("Variable type should be in set " - "[LOD_TENSOR, SELECTED_ROWS]") - return var_mapping + def _insert_split_op(self, program, orig_var, index, splited_vars): + if orig_var.type == core.VarDesc.VarType.SELECTED_ROWS: + height_sections = [] + for v in splited_vars: + height_sections.append(v.shape[0]) + program.global_block()._insert_op( + index=index + 1, + type="split_selected_rows", + inputs={"X": orig_var}, + outputs={"Out": splited_vars}, + attrs={"height_sections": height_sections}) + elif orig_var.type == core.VarDesc.VarType.LOD_TENSOR: + sections = [] + for v in splited_vars: + sections.append(v.shape[0]) + program.global_block()._insert_op( + index=index + 1, + type="split_byref", + inputs={"X": orig_var}, + outputs={"Out": splited_vars}, + attrs={"sections": sections} # assume split evenly + ) + else: + AssertionError("Variable type should be in set " + "[LOD_TENSOR, SELECTED_ROWS]") def _get_optimizer_input_shape(self, op_type, varkey, orig_shape, param_shape): @@ -884,66 +1110,111 @@ class DistributeTranspiler: pass return orig_shape - def _orig_varname(self, varname): - suff_idx = varname.find(".trainer_") + def _get_varname_parts(self, varname): + # returns origin, blockid, trainerid orig_var_name = "" - if suff_idx >= 0: - orig_var_name = varname[:suff_idx] + trainer_part = "" + block_part = "" + trainer_idx = varname.find(".trainer_") + if trainer_idx >= 0: + trainer_part = varname[trainer_idx + 1:] + else: + trainer_idx = len(varname) + block_index = varname.find(".block") + if block_index >= 0: + block_part = varname[block_index + 1:trainer_idx] + else: + block_index = len(varname) + orig_var_name = varname[0:min(block_index, trainer_idx)] + return orig_var_name, block_part, trainer_part + + def _orig_varname(self, varname): + orig, _, _ = self._get_varname_parts(varname) + return orig + + def _append_pserver_grad_merge_ops(self, optimize_block, + grad_varname_for_block, endpoint, + grad_to_block_id, origin_program): + program = optimize_block.program + pserver_block = program.global_block() + grad_block = None + for g in self.param_grad_ep_mapping[endpoint]["grads"]: + if self._orig_varname(g.name) == \ + self._orig_varname(grad_varname_for_block): + grad_block = g + break + if not grad_block: + # do not append this op if current endpoint + # is not dealing with this grad block + return + orig_varname, block_name, trainer_name = self._get_varname_parts( + grad_block.name) + if block_name: + merged_var_name = '.'.join([orig_varname, block_name]) else: - orig_var_name = varname - return orig_var_name + merged_var_name = orig_varname + merged_var = \ + pserver_block.vars[merged_var_name] + grad_to_block_id.append(merged_var.name + ":" + str(optimize_block.idx)) + if self.sync_mode and self.trainer_num > 1: + vars2merge = [] + for i in range(self.trainer_num): + per_trainer_name = "%s.trainer_%d" % \ + (merged_var_name, i) + vars2merge.append(pserver_block.vars[per_trainer_name]) + + optimize_block.append_op( + type="sum", + inputs={"X": vars2merge}, + outputs={"Out": merged_var}, + attrs={"use_mkldnn": False}) + # TODO(panyx0718): What if it's SELECTED_ROWS. + if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: + optimize_block.append_op( + type="scale", + inputs={"X": merged_var}, + outputs={"Out": merged_var}, + attrs={"scale": 1.0 / float(self.trainer_num)}) + return merged_var def _append_pserver_ops(self, optimize_block, opt_op, endpoint, - grad_to_block_id, origin_program): + grad_to_block_id, origin_program, merged_var): program = optimize_block.program pserver_block = program.global_block() new_inputs = dict() + # update param/grad shape first, then other inputs like # moment can use the updated shape + def _get_param_block(opt_op): + # param is already created on global program + param_block = None + for p in self.param_grad_ep_mapping[endpoint]["params"]: + if same_or_split_var(p.name, opt_op.input("Param")[0]): + param_block = p + break + return param_block + for key in opt_op.input_names: if key == "Grad": - grad_block = None - for g in self.param_grad_ep_mapping[endpoint]["grads"]: - if same_or_split_var( - self._orig_varname(g.name), - self._orig_varname(opt_op.input(key)[0])): - grad_block = g - break - if not grad_block: - # do not append this op if current endpoint - # is not dealing with this grad block - return - merged_var = \ - pserver_block.vars[self._orig_varname(grad_block.name)] - grad_to_block_id.append(merged_var.name + ":" + str( - optimize_block.idx)) - if self.sync_mode and self.trainer_num > 1: - vars2merge = [] - for i in xrange(self.trainer_num): - per_trainer_name = "%s.trainer_%d" % \ - (self._orig_varname(grad_block.name), i) - vars2merge.append(pserver_block.vars[per_trainer_name]) - - optimize_block.append_op( - type="sum", - inputs={"X": vars2merge}, - outputs={"Out": merged_var}) - # TODO(panyx0718): What if it's SELECTED_ROWS. - if not merged_var.type == core.VarDesc.VarType.SELECTED_ROWS: - optimize_block.append_op( - type="scale", - inputs={"X": merged_var}, - outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainer_num)}) - new_inputs[key] = merged_var + # For RMSProp optimizer + elif key == "Moment" or key == "MeanSquare": + param_block = _get_param_block(opt_op) + if not param_block: + return + moment_var = origin_program.global_block().vars[opt_op.input( + key)[0]] + tmpvar = pserver_block.create_var( + name=moment_var.name, + persistable=moment_var.persistable, + dtype=moment_var.dtype, + # change to use same shape as param + # TODO(typhoonzero): didn't append .block in the var name, + # may affect checkpoint saving? Need to verify. + shape=param_block.shape) + new_inputs[key] = tmpvar elif key == "Param": - # param is already created on global program - param_block = None - for p in self.param_grad_ep_mapping[endpoint]["params"]: - if same_or_split_var(p.name, opt_op.input(key)[0]): - param_block = p - break + param_block = _get_param_block(opt_op) if not param_block: return tmpvar = pserver_block.create_var( @@ -956,7 +1227,7 @@ class DistributeTranspiler: # learning rate variable has already be created by non-optimize op, # don't create it once again. lr_varname = opt_op.input(key)[0] - if pserver_block.vars.has_key(lr_varname): + if lr_varname in pserver_block.vars: new_inputs[key] = pserver_block.vars[opt_op.input(key)[0]] else: origin_var = origin_program.global_block().vars[lr_varname] @@ -969,7 +1240,7 @@ class DistributeTranspiler: for key in opt_op.input_names: new_shape = None - if key in ["Param", "Grad", "LearningRate"]: + if key in ["Param", "Grad", "LearningRate", "Moment", "MeanSquare"]: continue var = self.origin_program.global_block().vars[opt_op.input(key)[0]] # update accumulator variable shape @@ -992,19 +1263,57 @@ class DistributeTranspiler: type=opt_op.type, inputs=new_inputs, outputs=outputs, - attrs=opt_op.attrs) + attrs=opt_op.all_attrs()) + + def _is_splited_grad_var(self, var, var_dict): + grad_block = None + # TODO(minqiyang): replace these items() with six.iteritems() to + # improve memory + for _, g in list(var_dict.items()): + if self._orig_varname(g.name) == self._orig_varname(var.name): + if g.name.find(".trainer_") == -1: + grad_block = g + break + return grad_block + + def _clone_lr_op(self, program, block, op): + inputs = self._get_input_map_from_op( + self.origin_program.global_block().vars, op) + for key, varlist in list(inputs.items()): + if not isinstance(varlist, list): + varlist = [varlist] + for var in varlist: + if var not in program.global_block().vars: + block._clone_variable(var) + + outputs = self._get_output_map_from_op( + self.origin_program.global_block().vars, op) + for key, varlist in list(outputs.items()): + if not isinstance(varlist, list): + varlist = [varlist] + for var in varlist: + if var not in program.global_block().vars: + block._clone_variable(var) + + return block.append_op( + type=op.type, inputs=inputs, outputs=outputs, attrs=op.all_attrs()) def _append_pserver_non_opt_ops(self, optimize_block, opt_op): program = optimize_block.program # Append the ops for parameters that do not need to be optimized/updated inputs = self._get_input_map_from_op( self.origin_program.global_block().vars, opt_op) - for varlist in inputs.itervalues(): + for key, varlist in list(inputs.items()): if not isinstance(varlist, list): varlist = [varlist] - for var in varlist: - if not program.global_block().vars.has_key(var.name): + # for ops like clipping and weight decay, get the splited var + # for inputs/outputs + grad_block = self._is_splited_grad_var( + var, program.global_block().vars) + if grad_block: + inputs[key] = grad_block + elif var.name not in program.global_block().vars: program.global_block().create_var( name=var.name, persistable=var.persistable, @@ -1013,61 +1322,44 @@ class DistributeTranspiler: outputs = self._get_output_map_from_op( self.origin_program.global_block().vars, opt_op) - - for varlist in outputs.itervalues(): + for key, varlist in list(outputs.items()): if not isinstance(varlist, list): varlist = [varlist] - for var in varlist: - program.global_block().clone_variable(var) - - optimize_block.append_op( + grad_block = self._is_splited_grad_var( + var, program.global_block().vars) + if grad_block: + outputs[key] = grad_block + elif var.name not in program.global_block().vars: + program.global_block()._clone_variable(var) + + return optimize_block.append_op( type=opt_op.type, inputs=inputs, outputs=outputs, - attrs=opt_op.attrs) + attrs=opt_op.all_attrs()) def _is_op_connected(self, op1, op2): # If one op's input is another op's output or # one op's output is another op's input, we say # the two operator is connected. - def _append_inname_remove_beta(varname_list): - op_input_names = [] - for in_name in varname_list: - # HACK: remove beta1 and beta2 to avoid let all - # ops connected. - if in_name.startswith("beta2_pow_acc") or \ - in_name.startswith("beta1_pow_acc"): - continue - else: - op_input_names.append(in_name) - return op_input_names - - op1_input_names = _append_inname_remove_beta(op1.desc.input_arg_names()) - op1_output_names = op1.desc.output_arg_names() - - op2_input_names = _append_inname_remove_beta(op2.desc.input_arg_names()) - op2_output_names = op2.desc.output_arg_names() - - if set(op1_output_names) & set(op2_input_names) or \ - set(op1_input_names) & set(op2_output_names): + if set(op1.desc.output_arg_names()) & set(op2.desc.input_arg_names()) or \ + set(op1.desc.input_arg_names()) & set(op2.desc.output_arg_names()): return True return False def _create_ufind(self, optimize_ops): # Create a unit find data struct by optimize ops ufind = UnionFind(optimize_ops) - for i in xrange(len(optimize_ops)): - for j in xrange(i, len(optimize_ops)): + for i in range(len(optimize_ops)): + for j in range(i, len(optimize_ops)): op1 = optimize_ops[i] op2 = optimize_ops[j] if self._is_op_connected(op1, op2): ufind.union(op1, op2) return ufind - def _is_opt_op(self, op): - # NOTE: It's a HACK implement. - # optimize op: SGDOptimize, MomentumOptimizer, AdamOptimizer and etc... + def _is_optimizer_op(self, op): if "Param" in op.input_names and \ "LearningRate" in op.input_names: return True @@ -1117,7 +1409,7 @@ class DistributeTranspiler: # find learning rate variables by optimize op lr_vars = set() for op in self.optimize_ops: - if self._is_opt_op(op): + if self._is_optimizer_op(op): lr_vars.add(op.input("LearningRate")[0]) find_ops = [] @@ -1134,7 +1426,7 @@ class DistributeTranspiler: # NOTE: we need to skip all optimize ops, since it is connected # with forward/backward ops and lr ops, we only need the lr ops. if op1 != op2 and self._is_op_connected(op1, op2) and \ - not self._is_opt_op(op1) and not self._is_opt_op(op2): + not self._is_optimizer_op(op1) and not self._is_optimizer_op(op2): ufind.union(op1, op2) # find all ops which is related with lr var for op1 in block.ops: @@ -1145,31 +1437,41 @@ class DistributeTranspiler: break return lr_ops + def _is_opt_role_op(self, op): + # NOTE: depend on oprole to find out whether this op is for + # optimize + op_maker = core.op_proto_and_checker_maker + optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize + if op_maker.kOpRoleAttrName() in op.attr_names and \ + int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role): + return True + return False + def _get_optimize_pass(self): + """ + Get optimizer operators, parameters and gradients from origin_program + Returns: + opt_ops (list): optimize operators. + params_grads (dict): paramter->gradient. + """ block = self.origin_program.global_block() opt_ops = [] params_grads = [] + origin_var_dict = self.origin_program.global_block().vars for op in block.ops: - if self._is_opt_op(op): - opt_ops.append(op) - params_grads.append((self.origin_program.global_block().var( - op.input("Param")[0]), - self.origin_program.global_block().var( - op.input("Grad")[0]))) - elif self._is_adam_connected_op(op): + if self._is_opt_role_op(op): opt_ops.append(op) + # HACK(wuyi): if we find grad vars from input of optimize + # ops, we may get the output of clip op. Use syntax "@GRAD" + # and op_role_var to get the pair. + for input_name in op.input_arg_names: + if input_name.find("@GRAD") != -1 and \ + op.attr(RPC_OP_ROLE_ATTR_NAME): + param_name = op.attr(OP_ROLE_VAR_ATTR_NAME)[0] + params_grads.append([ + origin_var_dict[param_name], + origin_var_dict[input_name] + ]) else: pass return opt_ops, params_grads - - def _is_adam_connected_op(self, op): - """ - A hack function to determinate whether the input operator - is connected to optimize operator. - """ - if op.type == "scale": - for in_name in op.input_arg_names: - if in_name.startswith("beta1_pow_acc") or \ - in_name.startswith("beta2_pow_acc"): - return True - return False diff --git a/python/paddle/fluid/transpiler/distribute_transpiler_simple.py b/python/paddle/fluid/transpiler/distribute_transpiler_simple.py deleted file mode 100644 index ea8c27cdca..0000000000 --- a/python/paddle/fluid/transpiler/distribute_transpiler_simple.py +++ /dev/null @@ -1,254 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from ..framework import Program, default_main_program, Parameter, Variable -from ..layer_helper import LayerHelper - - -def hash_name_to_server(params_grads, pserver_endpoints): - """ - :param param_grads: - :return: a map of pserver endpoint -> - params -> [param list] - grads -> [grad list] - """ - - def _hash_param(param_name, total): - return hash(param_name) % total - - param_grad_map = dict() - for param, grad in params_grads: - if param.trainable is True and grad is not None: - server_id = _hash_param(param.name, len(pserver_endpoints)) - server_for_param = pserver_endpoints[server_id] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) - - return param_grad_map - - -def round_robin(params_grads, pserver_endpoints): - assert (len(params_grads) > len(pserver_endpoints)) - - param_grad_map = dict() - pserver_idx = 0 - for param, grad in params_grads: - if param.trainable is True: - server_for_param = pserver_endpoints[pserver_idx] - if not param_grad_map.has_key(server_for_param): - param_grad_map[server_for_param] = {"params": [], "grads": []} - - param_grad_map[server_for_param]["params"].append(param) - param_grad_map[server_for_param]["grads"].append(grad) - - pserver_idx += 1 - if pserver_idx >= len(pserver_endpoints): - pserver_idx = 0 - return param_grad_map - - -class SimpleDistributeTranspiler: - def transpile(self, - optimize_ops, - params_grads, - program=None, - pservers="127.0.0.1:6174", - trainers=1, - split_method=round_robin): - """ - Transpile the program to a distributed data-parallelism programs. - - The main_program will be transform to use a remote parameter server - to do parameter optimization. And the optimization graph will be put - in to a parameter server program. - - Use different methods to split trainable varialbles to different - parameter servers. - - Example to run: - - exe = fluid.Executor(place) - t = fluid.DistributeTranspiler() - t.transpile(optimize_ops, params_grads, pservers="127.0.0.1:6174", trainers=1) - - pserver_endpoint = os.getenv("PSERVER") - if pserver_endpoint: - pserver_prog = t.get_pserver_program(pserver_endpoint, optimize_ops) - exe.run(fluid.default_startup_program()) - exe.run(pserver_prog) - else: - feeder = fluid.DataFeeder(feed_list=[images, label], place=place) - exe.run(fluid.default_startup_program()) - - for pass_id in range(PASS_NUM): - ... - - :param optimize_ops: op list of optimization, should be the - return value of Optimizer.minimize - :type optimize_ops: list - :param program: program to optimize, default default_main_program - :param pservers: parameter server endpoints like "m1:6174,m2:6174" - :type pservers: string - - :return: return a list of programs - """ - if program is None: - program = default_main_program() - self.program = program - self.trainers = trainers - self.optimize_ops = optimize_ops - self._optimize_distributed( - optimize_ops, - program, - params_grads, - pservers=pservers, - trainers=trainers, - split_method=split_method) - - def _clone_param(self, block, v): - assert isinstance(v, Parameter) - new_p = Parameter( - block=block, - shape=v.shape, - dtype=v.dtype, - type=v.type, - lod_level=v.lod_level, - stop_gradient=v.stop_gradient, - trainable=v.trainable, - optimize_attr=v.optimize_attr, - regularizer=v.regularizer, - name=v.name) - block.vars[new_p.name] = new_p - - def _clone_var(self, block, var): - assert isinstance(var, Variable) - return block.create_var( - name=var.name, - shape=var.shape, - dtype=var.dtype, - type=var.type, - lod_level=var.lod_level, - persistable=var.persistable) - - def _optimize_distributed(self, optimize_ops, program, params_and_grads, - **kwargs): - if kwargs.has_key("split_method"): - split_method = kwargs["split_method"] - else: - split_method = round_robin - - assert (callable(split_method)) - pserver_endpoints = kwargs["pservers"].split(",") - self.param_grad_map = split_method(params_and_grads, pserver_endpoints) - - send_op_ordered_inputs = [] - send_op_ordered_outputs = [] - epmap = [] - for ep, v in self.param_grad_map.iteritems(): - send_op_ordered_inputs.extend(v["grads"]) - send_op_ordered_outputs.extend(v["params"]) - for i in v["grads"]: - epmap.append(ep) - send_op = program.global_block().append_op( - type="send", - inputs={"X": send_op_ordered_inputs - }, # inputs is a list of tensors to be send - outputs={"Out": send_op_ordered_outputs}, - attrs={"endpoints": pserver_endpoints, - "epmap": epmap}) - - def get_trainer_program(self): - # remove optimize ops and add a send op to main_program - self.program.global_block().delete_ops(self.optimize_ops) - return self.program - - def _create_var_for_trainers(self, block, var, trainers): - var_list = [] - for i in xrange(trainers): - var_each = block.create_var( - name="%s.trainer_%d" % (var.name, i), - psersistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - var_list.append(var_each) - return var_list - - def get_pserver_program(self, endpoint, optimize_ops): - pserver_program = Program() - for v in self.param_grad_map[endpoint]["params"]: - self._clone_param(pserver_program.global_block(), v) - - optimize_sub_program = Program() - grad_var_names = [ - var.name for var in self.param_grad_map[endpoint]["grads"] - ] - for opt_op in optimize_ops: - for _, var in opt_op.inputs.iteritems(): - # NOTE: append operators to merge gradients from multiple - # trainers. If trainers == 1, this is not needed. - if self.trainers > 1 and var.name in grad_var_names: - vars2merge = self._create_var_for_trainers( - optimize_sub_program.global_block(), var, self.trainers) - merged_var = optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - optimize_sub_program.global_block().append_op( - type="sum", - inputs={"X": vars2merge}, - outputs={"Out": merged_var}) - optimize_sub_program.global_block().append_op( - type="scale", - inputs={"X": merged_var}, - outputs={"Out": merged_var}, - attrs={"scale": 1.0 / float(self.trainers)}) - else: - optimize_sub_program.global_block().create_var( - name=var.name, - persistable=var.persistable, - dtype=var.dtype, - shape=var.shape) - - if opt_op.inputs.has_key("Grad"): - if opt_op.inputs["Grad"].name in grad_var_names: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) - else: - optimize_sub_program.global_block().append_op( - type=opt_op.type, - inputs=opt_op.inputs, - outputs=opt_op.outputs, - attrs=opt_op.attrs) - pserver_program.global_block().append_op( - type="recv", - inputs={"RX": - self.param_grad_map[endpoint]["grads"]}, # grads to recv - outputs={}, - attrs={ - "OptimizeBlock": optimize_sub_program.global_block(), - "endpoint": endpoint, - "ParamList": - [p.name for p in self.param_grad_map[endpoint]["params"]], - "GradList": - [p.name for p in self.param_grad_map[endpoint]["grads"]], - "Trainers": self.trainers - }) - pserver_program.sync_with_cpp() - return pserver_program diff --git a/python/paddle/fluid/transpiler/distributed_splitter.py b/python/paddle/fluid/transpiler/distributed_splitter.py deleted file mode 100644 index 060c1df8ad..0000000000 --- a/python/paddle/fluid/transpiler/distributed_splitter.py +++ /dev/null @@ -1,57 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -def hash_name(varlist, pserver_endpoints): - """ - hash variable names to several endpoints. - - Args: - varlist(list): a list of Variables - - Returns(dict): a map of pserver endpoint -> varname - """ - - def _hash_block(block_str, total): - return hash(block_str) % total - - eplist = [] - for var in varlist: - server_id = _hash_block(var.name(), len(pserver_endpoints)) - server_for_param = pserver_endpoints[server_id] - eplist.append(server_for_param) - return eplist - - -def round_robin(varlist, pserver_endpoints): - """ - Distribute variables to several endpoints. - Args: - varlist(list): a list of variables - pserver_endpoints(list): a list of pserver endpoints - - Returns(list[int]): the endpoint for each variable - """ - assert (len(varlist) >= len(pserver_endpoints)) - - eplist = [] - pserver_idx = 0 - for var in varlist: - server_for_param = pserver_endpoints[pserver_idx] - eplist.append(server_for_param) - - pserver_idx += 1 - if pserver_idx >= len(pserver_endpoints): - pserver_idx = 0 - return eplist diff --git a/python/paddle/fluid/transpiler/inference_transpiler.py b/python/paddle/fluid/transpiler/inference_transpiler.py index 202aa76084..87f20bbccf 100644 --- a/python/paddle/fluid/transpiler/inference_transpiler.py +++ b/python/paddle/fluid/transpiler/inference_transpiler.py @@ -12,23 +12,41 @@ # See the License for the specific language governing permissions and # limitations under the License. +import os import numpy as np from .. import core from ..framework import Program from ..executor import global_scope -class InferenceTranspiler: +class InferenceTranspiler(object): + ''' + Convert the fluid program to optimized inference program. + + There are several optimizations: + + - fuse convolution and batch normalization + - fuse batch normalization and relu (MKLDNN only) + + Examples: + + .. code-block:: python + + # As InferenceTranspiler will modify the original program, + # please clone before use it. + inference_transpiler_program = program.clone() + t = fluid.InferenceTranspiler() + t.transpile(inference_transpiler_program, place) + ''' + def transpile(self, program, place, scope=None): ''' - Transpile the program. Support only fuse batch normalization now. + Run the transpiler. - :param program: program to transpile - :type program: Program - :param place: inference place - :type place: Place - :param scope: inference scope - :type scope: Scope or None + Args: + program (Program): program to transpile + place (Place): inference place + scope (Scope|None): inference Scope ''' if not isinstance(program, Program): raise TypeError("program should be as Program type") @@ -39,59 +57,111 @@ class InferenceTranspiler: scope = global_scope() if not isinstance(scope, core.Scope): raise TypeError("scope should be as Scope type or None") - self.fuse_batch_norm(program, place, scope) + self._fuse_batch_norm(program, place, scope) + self._fuse_relu_mkldnn(program) + + def _fuse_relu_mkldnn(self, program): + ''' + Transpile the program by fused relu activation for MKLDNN program. + + Relu activation following batch norm OP can be fused by adding + :math:`fuse_with_relu` attribute to batch norm OP. + + The result of fuse is: + + - before: + + - batch_norm->relu->any_other_op + + - after: + + - batch_norm->any_other_op + + :param program: program to transpile + :type program: Program + ''' + use_mkldnn = bool(os.getenv("FLAGS_use_mkldnn", False)) + if not use_mkldnn: + return + + self.block = program.block(0) + + i = 0 + while i < len(self.block.ops) - 1: + current_op = self.block.ops[i] + if current_op.type in ['batch_norm']: + next_op = self.block.ops[i + 1] + if next_op.type == 'relu': + # modify bnorm OP to include relu + current_op.set_attr("fuse_with_relu", True) + # remove relu OP + self.block._remove_op(i + 1) + i = i + 1 - def fuse_batch_norm(self, program, place, scope): + self._remove_unused_var() + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. + # And a better solution will be considered later. + program = program.clone() + + def _fuse_batch_norm(self, program, place, scope): ''' Transpile the program by fused batch normalization. - - The batch normalization followed the convolution or fully connected layer - can be integrated with them. Doing so will give us a forward acceleration, + + The batch normalization followed the convolution or fully connected layer + can be integrated with them. Doing so will give us a forward acceleration, especially in environments like mobile or embedded. - - For input X: - - Conv process: X = input * W + bias - - Batch norm process: X' = (X - mean) / std - - Scale Process: Y = a * X' + b + + For input :math:`X`: + + - Conv process: :math:`X = input * W + bias` + - Batch norm process: :math:`X' = (X - mean) / std` + - Scale Process: :math:`Y = a * X' + b` After fuse into one operation: - Y = (input * W + bias - mean) / std * a + b - = input * a * W / std + ((bias - mean) / std * a + b) + .. math:: + + Y &= (input * W + bias - mean) / std * a + b \\\\ + &= input * a * W / std + ((bias - mean) / std * a + b) + + The operator transformation is: - The operator transformation is: - before: + - conv->batch_norm->any_other_op (bias == 0) - conv->elementwise_add->batch_norm->any_other_op (bias != 0) - - after: + + - after: + - conv->elementwise_add->any_other_op - + The transpile stages are: + 1. insert elementwise_add op when bias == 0. 2. fuse the batch_norm's parameters to conv and elementwise_add operators. 3. remove batch_norm ops which are not used in any other ops. 4. adjust the input of any_other_op to be the output of elementwise_add operator. 5. remove unused variables. - :param program: program to transpile - :type program: Program - :param place: inference place - :type place: Place - :param scope: inference scope - :type scope: Scope + Args: + program (Program): program to transpile + place (Place): inference place + scope (Scope): inference Scope + ''' self.scope = scope self.place = place self.block = program.block(0) - self.input_map = {} # store the input names should be adjusted + self.input_map = {} # store the input names should be adjusted i = 0 - while i < len(self.block.ops): + while i < len(self.block.ops) - 2: current_op = self.block.ops[i] # TODO(luotao1): consider only conv2d now. fc would be delt later. if current_op.type in ['conv2d']: - # TODO(luotao1): consider single chain network now. - # For branch network, we counldn't use block.ops[i + 1] as + # TODO(luotao1): consider single chain network now. + # For branch network, we counldn't use block.ops[i + 1] as # the judgment condition. next_op = self.block.ops[i + 1] # conv2d without bias @@ -101,7 +171,7 @@ class InferenceTranspiler: # fuse batch_norm self._fuse_param(current_op, next_op, bias_op, 0) # remove batch_norm_op - self.block.remove_op(i + 2) + self.block._remove_op(i + 2) i = i + 1 # conv2d with bias, the next_op.type is elementwise_add elif (next_op.type == 'elementwise_add'): @@ -110,23 +180,23 @@ class InferenceTranspiler: # fuse batch_norm self._fuse_param(current_op, next_next_op, next_op, 1) # remove batch_norm_op - self.block.remove_op(i + 2) + self.block._remove_op(i + 2) i = i + 1 i = i + 1 self._adjust_input() self._remove_unused_var() - # TODO(luotao): use clone() method to flush the program.desc in force, - # since some large program.desc will not be flushed immediately. + # TODO(luotao): use clone() method to flush the program.desc in force, + # since some large program.desc will not be flushed immediately. # And a better solution will be considered later. program = program.clone() # ====================== private transpiler functions ===================== def _insert_bias_op(self, index, current_op, bn_op): ''' - Construct elementwise_add operator for adding bias + Construct elementwise_add operator for adding bias and insert it into program. - + :param index: insert location of bias_op :type index: Int :param current_op: current operator (conv or fc) @@ -142,7 +212,7 @@ class InferenceTranspiler: y_var = self.block.var(bn_op.input("Bias")[0]) out_var = self.block.var(bn_op.output("Y")[0]) - bias_op = self.block.insert_op( + bias_op = self.block._insert_op( index, type="elementwise_add", inputs={"X": x_var, @@ -154,14 +224,14 @@ class InferenceTranspiler: def _fuse_param(self, current_op, bn_op, bias_op, with_bias): ''' fuse the batch_norm_op' parameters to current_op (conv or fc) - + :param current_op: current operator (conv or fc) :type current_op: Operator :param bn_op: batch norm operator :type bn_op: Operator :param bias_op: elementwise_add operator for adding bias :type bias_op: Operator - :param with_bias: If current operator has bias, with_bias = 1; otherwise 0. + :param with_bias: If current operator has bias, with_bias = 1; otherwise 0. :type with_bias: Int ''' @@ -235,6 +305,6 @@ class InferenceTranspiler: args += current_op.output_arg_names args = list(set(args)) # unique the input and output arguments - for var in self.block.vars.keys(): + for var in list(self.block.vars.keys()): if var not in args: - self.block.remove_var(var) + self.block._remove_var(var) diff --git a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py index 49034b47b2..20ba7ed2b0 100644 --- a/python/paddle/fluid/transpiler/memory_optimization_transpiler.py +++ b/python/paddle/fluid/transpiler/memory_optimization_transpiler.py @@ -14,8 +14,10 @@ from collections import defaultdict from .. import core -from ..framework import Program, default_main_program, Parameter, Variable +from ..framework import Program, default_main_program, Parameter from ..backward import _rename_arg_ +from functools import reduce +from six.moves import range dtype_to_size = { core.VarDesc.VarType.FP16: 2, @@ -24,7 +26,8 @@ dtype_to_size = { core.VarDesc.VarType.INT16: 2, core.VarDesc.VarType.INT32: 4, core.VarDesc.VarType.INT64: 8, - core.VarDesc.VarType.BOOL: 1 + core.VarDesc.VarType.BOOL: 1, + core.VarDesc.VarType.UINT8: 1, } SUB_BLOCK_OPS = [ @@ -106,7 +109,7 @@ class ControlFlowGraph(object): # Repeatedly apply liveness updates until the algorithm stablize # on a complete set live input vars and live output vars. while True: - for i in range(self.op_size, 0, -1): + for i in reversed(list(range(self.op_size))): live_in[i] = set(self._live_in[i]) live_out[i] = set(self._live_out[i]) for s in self._successors[i]: @@ -156,9 +159,11 @@ class ControlFlowGraph(object): if op.type() == "fill_constant" and op.attr("force_cpu") == True: self._skip_opt.update(op.output_arg_names()) - def release_memory(self): + def release_memory(self, skip_opt_set=None): self._dataflow_analyze() self._update_skip_opt_set() + if skip_opt_set: + self._skip_opt.update(skip_opt_set) fwd_id = 0 bwd_id = 0 for i in range(self.op_size): @@ -169,12 +174,13 @@ class ControlFlowGraph(object): is_forward = i < self._forward_num in_diff, out_diff = self._get_diff(self._live_in[i], self._live_out[i]) - can_optimize = filter( - lambda x: self._check_var_validity(block_desc, x, is_forward), - in_diff) + can_optimize = [ + x for x in in_diff + if self._check_var_validity(block_desc, x, is_forward) + ] if can_optimize: index = i + fwd_id + 1 if is_forward else i - self._forward_num + bwd_id + 1 - delete_op = block_desc.insert_op(index) + delete_op = block_desc._insert_op(index) delete_op.set_type("delete_var") delete_op.set_input("X", can_optimize) if is_forward: @@ -182,7 +188,7 @@ class ControlFlowGraph(object): else: bwd_id += 1 - def memory_optimize(self, level=0): + def memory_optimize(self, skip_opt_set=None, level=0): def compare_shape(x_shape, cache_shape, opt_level): if opt_level == 0: return x_shape == cache_shape @@ -199,6 +205,9 @@ class ControlFlowGraph(object): self._dataflow_analyze() self._update_skip_opt_set() + # update skip set to meet users' demand + if skip_opt_set: + self._skip_opt.update(skip_opt_set) self.pool = [] for i in range(self.op_size): op = self._ops[i] @@ -207,9 +216,10 @@ class ControlFlowGraph(object): block_desc = op.block() is_forward = i < self._forward_num if self.pool: - defs_can_optimize = filter( - lambda x: self._check_var_validity(block_desc, x, is_forward), - self._defs[i]) + defs_can_optimize = [ + x for x in self._defs[i] + if self._check_var_validity(block_desc, x, is_forward) + ] out_pair = [ (x, self._find_var(block_desc, x, is_forward).shape()) for x in defs_can_optimize @@ -255,9 +265,10 @@ class ControlFlowGraph(object): break in_diff, _ = self._get_diff(self._live_in[i], self._live_out[i]) - can_optimize = filter( - lambda x: self._check_var_validity(block_desc, x, is_forward), - in_diff) + can_optimize = [ + x for x in in_diff + if self._check_var_validity(block_desc, x, is_forward) + ] if can_optimize: for var_name in can_optimize: self.pool.append((var_name, self._find_var( @@ -318,6 +329,8 @@ def _process_sub_block_pair(pdesc, sub_block_pair): sub_op_output = set() sub_op_output.update(sub_op_dict[fwd_id].output_arg_names()) sub_op_output.update(sub_op_dict[grad_id].output_arg_names()) + sub_op_output.update(sub_op_dict[fwd_id].input_arg_names()) + sub_op_output.update(sub_op_dict[grad_id].input_arg_names()) ops_list.append((sub_block_ops, block_op_size, sub_op_output)) # Process rest fwd_op block ops @@ -329,6 +342,7 @@ def _process_sub_block_pair(pdesc, sub_block_pair): sub_block_ops.append(sub_block.op(i)) sub_op_output = set() sub_op_output.update(sub_op_dict[fwd_id].output_arg_names()) + sub_op_output.update(sub_op_dict[fwd_id].input_arg_names()) ops_list.append((sub_block_ops, sub_block_op_size, sub_op_output)) return ops_list @@ -343,13 +357,17 @@ def _get_cfgs(input_program): pdesc = input_program.get_desc() block_desc = pdesc.block(0) op_size = block_desc.op_size() - # Get global block ops - ops_list.append( - ([block_desc.op(i) for i in range(op_size)], op_size, set())) # Only process one level of nested subblock. ops_list.extend(_process_sub_block_pair(pdesc, SUB_BLOCK_PAIR)) + skip_opt_set = set() + for _, _, skip_opt in ops_list: + skip_opt_set.update(skip_opt) + + # Get global block ops + ops_list.insert( + 0, ([block_desc.op(i) for i in range(op_size)], op_size, skip_opt_set)) cfgs = [ ControlFlowGraph(input_program, ops, forward_num, skip_opt) for ops, forward_num, skip_opt in ops_list @@ -357,7 +375,7 @@ def _get_cfgs(input_program): return cfgs -def memory_optimize(input_program, print_log=False, level=0): +def memory_optimize(input_program, skip_opt_set=None, print_log=False, level=0): """Optimize memory by reusing var memory. Note: it doesn't not support subblock nested in subblock. @@ -373,10 +391,20 @@ def memory_optimize(input_program, print_log=False, level=0): PRINT_LOG = print_log cfgs = _get_cfgs(input_program) for cfg in cfgs: - cfg.memory_optimize(level) + cfg.memory_optimize(skip_opt_set=skip_opt_set, level=level) -def release_memory(input_program): +def release_memory(input_program, skip_opt_set=None): + """ + Modify the input program and insert :code:`delete_op` to early drop not used + variables. The modification will be performed inplace. + + Notes: This is an experimental API and could be removed in next few + releases. Users should not use this API. + + Args: + input_program(Program): The program will be inserted :code:`delete_op`. + """ cfgs = _get_cfgs(input_program) for cfg in cfgs: - cfg.release_memory() + cfg.release_memory(skip_opt_set=skip_opt_set) diff --git a/python/paddle/fluid/transpiler/ps_dispatcher.py b/python/paddle/fluid/transpiler/ps_dispatcher.py new file mode 100644 index 0000000000..dcffadd531 --- /dev/null +++ b/python/paddle/fluid/transpiler/ps_dispatcher.py @@ -0,0 +1,88 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class PSDispatcher(object): + """ + PSDispatcher is the base class for dispatching vars + into different pserver instance. + You need to implement the `dispatch` inferface. + """ + + def __init__(self, pserver_endpoints): + self._eps = pserver_endpoints + self._step = 0 + + @property + def eps(self): + return self._eps + + def reset(self): + self._step = 0 + + def dispatch(self, varlist): + """ + Args: + varlist(list): a list of Variables + Returns: + a map of pserver endpoint -> varname + """ + AssertionError("Interface has not been implemented.") + + +class HashName(PSDispatcher): + """ + Hash variable names to several endpoints using python + "hash()" function. + + Args: + pserver_endpoints (list): list of endpoint(ip:port). + """ + + def __init__(self, pserver_endpoints): + super(self.__class__, self).__init__(pserver_endpoints) + + def _hash_block(self, block_str, total): + return hash(block_str) % total + + def dispatch(self, varlist): + eplist = [] + for var in varlist: + server_id = self._hash_block(var.name(), len(self._eps)) + server_for_param = self._eps[server_id] + eplist.append(server_for_param) + return eplist + + +class RoundRobin(PSDispatcher): + """ + Distribute variables to serveral endpoints using + RondRobin method. + + Args: + pserver_endpoints (list): list of endpoint(ip:port). + """ + + def __init__(self, pserver_endpoints): + super(self.__class__, self).__init__(pserver_endpoints) + + def dispatch(self, varlist): + eplist = [] + for var in varlist: + server_for_param = self._eps[self._step] + eplist.append(server_for_param) + self._step += 1 + if self._step >= len(self._eps): + self._step = 0 + return eplist diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/fluid/unique_name.py index 33c53113ae..b125eba4f8 100644 --- a/python/paddle/fluid/unique_name.py +++ b/python/paddle/fluid/unique_name.py @@ -14,9 +14,10 @@ import collections import contextlib +import six import sys -__all__ = ['generate', 'switch', 'guard', 'UniqueNameGenerator'] +__all__ = ['generate', 'switch', 'guard'] class UniqueNameGenerator(object): @@ -67,8 +68,10 @@ def switch(new_generator=None): @contextlib.contextmanager def guard(new_generator=None): - if isinstance(new_generator, basestring): + if isinstance(new_generator, six.string_types): new_generator = UniqueNameGenerator(new_generator) + elif isinstance(new_generator, six.binary_type): + new_generator = UniqueNameGenerator(new_generator.decode()) old = switch(new_generator) yield switch(old) diff --git a/python/paddle/libs/__init__.py b/python/paddle/libs/__init__.py new file mode 100644 index 0000000000..34d4f4d07e --- /dev/null +++ b/python/paddle/libs/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# used for setup.py.in to store the thirdparty shared libraries diff --git a/python/paddle/reader/__init__.py b/python/paddle/reader/__init__.py index 3b059735a9..678026cf95 100644 --- a/python/paddle/reader/__init__.py +++ b/python/paddle/reader/__init__.py @@ -66,9 +66,9 @@ An example implementation for multiple item data reader creator: TODO(yuyang18): Should we add whole design doc here? """ -import decorator -from decorator import * +import paddle.reader.decorator +from paddle.reader.decorator import * -import creator +import paddle.reader.creator __all__ = decorator.__all__ + ['creator'] diff --git a/python/paddle/reader/creator.py b/python/paddle/reader/creator.py index 4c905d959f..c861020225 100644 --- a/python/paddle/reader/creator.py +++ b/python/paddle/reader/creator.py @@ -67,11 +67,14 @@ def recordio(paths, buf_size=100): import recordio as rec import paddle.reader.decorator as dec - import cPickle as pickle + import six + import six.moves.cPickle as pickle def reader(): - if isinstance(paths, basestring): + if isinstance(paths, six.string_types): path = paths + elif isinstance(paths, six.binary_type): + path = paths.decode() else: path = ",".join(paths) f = rec.reader(path) diff --git a/python/paddle/reader/decorator.py b/python/paddle/reader/decorator.py index 44a6e34463..ce410e61b9 100644 --- a/python/paddle/reader/decorator.py +++ b/python/paddle/reader/decorator.py @@ -20,7 +20,10 @@ __all__ = [ from threading import Thread import subprocess -from Queue import Queue +from six.moves.queue import Queue +from six.moves import zip_longest +from six.moves import map +from six.moves import zip import itertools import random import zlib @@ -42,7 +45,7 @@ def map_readers(func, *readers): rs = [] for r in readers: rs.append(r()) - for e in itertools.imap(func, *rs): + for e in map(func, *rs): yield e return reader @@ -148,16 +151,16 @@ def compose(*readers, **kwargs): for r in readers: rs.append(r()) if not check_alignment: - for outputs in itertools.izip(*rs): - yield sum(map(make_tuple, outputs), ()) + for outputs in zip(*rs): + yield sum(list(map(make_tuple, outputs)), ()) else: - for outputs in itertools.izip_longest(*rs): + for outputs in zip_longest(*rs): for o in outputs: if o is None: # None will be not be present if compose is aligned raise ComposeNotAligned( "outputs of readers are not aligned.") - yield sum(map(make_tuple, outputs), ()) + yield sum(list(map(make_tuple, outputs)), ()) return reader @@ -306,7 +309,7 @@ def xmap_readers(mapper, reader, process_num, buffer_size, order=False): args = (in_queue, out_queue, mapper, out_order) if order else ( in_queue, out_queue, mapper) workers = [] - for i in xrange(process_num): + for i in range(process_num): worker = Thread(target=target, args=args) worker.daemon = True workers.append(worker) @@ -336,7 +339,7 @@ def _buf2lines(buf, line_break="\n"): class PipeReader: """ - PipeReader read data by stream from a command, take it's + PipeReader read data by stream from a command, take it's stdout into a pipe buffer and redirect it to the parser to parse, then yield data as your desired format. @@ -352,7 +355,7 @@ class PipeReader: An example: .. code-block:: python - + def example_reader(): for f in myfiles: pr = PipeReader("cat %s"%f) diff --git a/python/paddle/reader/tests/decorator_test.py b/python/paddle/reader/tests/decorator_test.py index bee24d3b65..537df489b9 100644 --- a/python/paddle/reader/tests/decorator_test.py +++ b/python/paddle/reader/tests/decorator_test.py @@ -136,7 +136,7 @@ class TestXmap(unittest.TestCase): reader = paddle.reader.xmap_readers(mapper, reader_creator_10(0), tNum, size, order) - for n in xrange(3): + for n in range(3): result = [] for i in reader(): result.append(i) @@ -156,7 +156,7 @@ class TestPipeReader(unittest.TestCase): import tempfile - records = [str(i) for i in xrange(5)] + records = [str(i) for i in range(5)] temp = tempfile.NamedTemporaryFile() try: with open(temp.name, 'w') as f: diff --git a/python/paddle/trainer/PyDataProviderWrapper.py b/python/paddle/trainer/PyDataProviderWrapper.py index 6af2507728..374976db9f 100644 --- a/python/paddle/trainer/PyDataProviderWrapper.py +++ b/python/paddle/trainer/PyDataProviderWrapper.py @@ -42,7 +42,7 @@ except ImportError: try: import cPickle as pickle except ImportError: - import pickle + import six.moves.cPickle as pickle import io diff --git a/python/paddle/trainer/config_parser.py b/python/paddle/trainer/config_parser.py index 460eb3b349..5b90facd49 100644 --- a/python/paddle/trainer/config_parser.py +++ b/python/paddle/trainer/config_parser.py @@ -67,7 +67,7 @@ extension_module_name=[MODULE_NAME], then config_parser will call MODULE_NAME.get_config_funcs(g_config) MODULE_NAME.get_config_funcs() should return a dictionary of name to functions, those functions will be available in the config file. -See trainer/tests/config_parser_test.py for example +See legacy/trainer/tests/config_parser_test.py for example To use this from paddle_trainer, paddle_trainer should be called with --config_args=extension_module_name=[MODULE_NAME] diff --git a/python/paddle/trainer_config_helpers/attrs.py b/python/paddle/trainer_config_helpers/attrs.py index e6f87ce61b..4e3beaf639 100644 --- a/python/paddle/trainer_config_helpers/attrs.py +++ b/python/paddle/trainer_config_helpers/attrs.py @@ -240,14 +240,15 @@ class ExtraLayerAttribute(object): :type error_clipping_threshold: float :param drop_rate: Dropout rate. Dropout will create a mask on layer output. The dropout rate is the zero rate of this mask. The - details of what dropout is please refer to `here - `_. + details of what dropout is please refer to `JMLRdropout + `_. :type drop_rate: float :param device: device ID of layer. device=-1, use CPU. device>=0, use GPU. - The details allocation in parallel_nn please refer to `here - `_. + The details allocation in parallel_nn please refer to `use_case + `_. :type device: int """ diff --git a/python/paddle/trainer_config_helpers/data_sources.py b/python/paddle/trainer_config_helpers/data_sources.py index ab9a2562dc..a2a32d848c 100644 --- a/python/paddle/trainer_config_helpers/data_sources.py +++ b/python/paddle/trainer_config_helpers/data_sources.py @@ -20,7 +20,7 @@ from .utils import deprecated try: import cPickle as pickle except ImportError: - import pickle + import six.moves.cPickle as pickle __all__ = ['define_py_data_sources2'] diff --git a/python/paddle/trainer_config_helpers/layers.py b/python/paddle/trainer_config_helpers/layers.py index ebc31b23e0..ee34c15733 100644 --- a/python/paddle/trainer_config_helpers/layers.py +++ b/python/paddle/trainer_config_helpers/layers.py @@ -28,7 +28,7 @@ from .default_decorators import * try: import cPickle as pickle except ImportError: - import pickle + import six.moves.cPickle as pickle import copy __all__ = [ @@ -2556,7 +2556,7 @@ def img_conv_layer(input, the output will be obtained by concatenating the two results. The details of grouped convolution, please refer to: - `ImageNet Classification with Deep Convolutional Neural Networks + `ImageNet Classification With Deep Convolutional Neural Networks `_ The example usage is: @@ -4182,9 +4182,9 @@ def recurrent_group(step, input, reverse=False, name=None, targetInlink=None): You can see following configs for further usages: - - time steps: lstmemory_group, paddle/gserver/tests/sequence_layer_group.conf, \ + - time steps: lstmemory_group, paddle/legacy/gserver/tests/sequence_layer_group.conf, \ demo/seqToseq/seqToseq_net.py - - sequence steps: paddle/gserver/tests/sequence_nest_layer_group.conf + - sequence steps: paddle/legacy/gserver/tests/sequence_nest_layer_group.conf :param step: A step function which takes the input of recurrent_group as its own input and returns values as recurrent_group's output every time step. @@ -5678,8 +5678,8 @@ def warp_ctc_layer(input, `_ library, which is used in `Deep Speech 2: End-toEnd Speech Recognition in English and Mandarin `_, to compute Connectionist Temporal - Classification (CTC) loss. Besides, another `warp-ctc - `_ repository, which is forked from + Classification (CTC) loss. Besides, another `warp-ctc repository + `_ , which is forked from the official one, is maintained to enable more compiling options. During the building process, PaddlePaddle will clone the source codes, build and install it to :code:`third_party/install/warpctc` directory. diff --git a/python/paddle/v2/dataset/cifar.py b/python/paddle/v2/dataset/cifar.py index 0a2a1ced11..662655c836 100644 --- a/python/paddle/v2/dataset/cifar.py +++ b/python/paddle/v2/dataset/cifar.py @@ -43,7 +43,7 @@ CIFAR100_URL = URL_PREFIX + 'cifar-100-python.tar.gz' CIFAR100_MD5 = 'eb9058c3a382ffc7106e4002c42a8d85' -def reader_creator(filename, sub_name): +def reader_creator(filename, sub_name, cycle=False): def read_batch(batch): data = batch['data'] labels = batch.get('labels', batch.get('fine_labels', None)) @@ -56,10 +56,13 @@ def reader_creator(filename, sub_name): names = (each_item.name for each_item in f if sub_name in each_item.name) - for name in names: - batch = cPickle.load(f.extractfile(name)) - for item in read_batch(batch): - yield item + while True: + for name in names: + batch = cPickle.load(f.extractfile(name)) + for item in read_batch(batch): + yield item + if not cycle: + break return reader @@ -94,34 +97,40 @@ def test100(): 'test') -def train10(): +def train10(cycle=False): """ CIFAR-10 training set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: Training reader creator :rtype: callable """ return reader_creator( paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), - 'data_batch') + 'data_batch', + cycle=cycle) -def test10(): +def test10(cycle=False): """ CIFAR-10 test set creator. It returns a reader creator, each sample in the reader is image pixels in [0, 1] and label in [0, 9]. + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: Test reader creator. :rtype: callable """ return reader_creator( paddle.v2.dataset.common.download(CIFAR10_URL, 'cifar', CIFAR10_MD5), - 'test_batch') + 'test_batch', + cycle=cycle) def fetch(): diff --git a/python/paddle/v2/dataset/conll05.py b/python/paddle/v2/dataset/conll05.py index 0d544efac9..8312900dc4 100644 --- a/python/paddle/v2/dataset/conll05.py +++ b/python/paddle/v2/dataset/conll05.py @@ -29,13 +29,13 @@ __all__ = ['test, get_dict', 'get_embedding', 'convert'] DATA_URL = 'http://www.cs.upc.edu/~srlconll/conll05st-tests.tar.gz' DATA_MD5 = '387719152ae52d60422c016e92a742fc' -WORDDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/wordDict.txt' +WORDDICT_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2FwordDict.txt' WORDDICT_MD5 = 'ea7fb7d4c75cc6254716f0177a506baa' -VERBDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/verbDict.txt' +VERBDICT_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2FverbDict.txt' VERBDICT_MD5 = '0d2977293bbb6cbefab5b0f97db1e77c' -TRGDICT_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/targetDict.txt' +TRGDICT_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2FtargetDict.txt' TRGDICT_MD5 = 'd8c7f03ceb5fc2e5a0fa7503a4353751' -EMB_URL = 'http://paddlepaddle.bj.bcebos.com/demo/srl_dict_and_embedding/emb' +EMB_URL = 'http://paddlemodels.bj.bcebos.com/conll05st%2Femb' EMB_MD5 = 'bf436eb0faa1f6f9103017f8be57cdb7' UNK_IDX = 0 diff --git a/python/paddle/v2/dataset/flowers.py b/python/paddle/v2/dataset/flowers.py index 7bdddeaabe..db12076d54 100644 --- a/python/paddle/v2/dataset/flowers.py +++ b/python/paddle/v2/dataset/flowers.py @@ -76,7 +76,8 @@ def reader_creator(data_file, dataset_name, mapper, buffered_size=1024, - use_xmap=True): + use_xmap=True, + cycle=False): ''' 1. read images from tar file and merge images into batch files in 102flowers.tgz_batch/ @@ -96,6 +97,8 @@ def reader_creator(data_file, :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: data reader :rtype: callable ''' @@ -108,23 +111,27 @@ def reader_creator(data_file, file_list = batch_images_from_tar(data_file, dataset_name, img2label) def reader(): - for file in open(file_list): - file = file.strip() - batch = None - with open(file, 'r') as f: - batch = cPickle.load(f) - data = batch['data'] - labels = batch['label'] - for sample, label in itertools.izip(data, batch['label']): - yield sample, int(label) - 1 + while True: + for file in open(file_list): + file = file.strip() + batch = None + with open(file, 'r') as f: + batch = cPickle.load(f) + data = batch['data'] + labels = batch['label'] + for sample, label in itertools.izip(data, batch['label']): + yield sample, int(label) - 1 + if not cycle: + break if use_xmap: - return xmap_readers(mapper, reader, cpu_count(), buffered_size) + cpu_num = int(os.environ.get('CPU_NUM', cpu_count())) + return xmap_readers(mapper, reader, cpu_num, buffered_size) else: return map_readers(mapper, reader) -def train(mapper=train_mapper, buffered_size=1024, use_xmap=True): +def train(mapper=train_mapper, buffered_size=1024, use_xmap=True, cycle=False): ''' Create flowers training set reader. It returns a reader, each sample in the reader is @@ -137,17 +144,23 @@ def train(mapper=train_mapper, buffered_size=1024, use_xmap=True): :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: train data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), TRAIN_FLAG, mapper, - buffered_size, use_xmap) + download(SETID_URL, 'flowers', SETID_MD5), + TRAIN_FLAG, + mapper, + buffered_size, + use_xmap, + cycle=cycle) -def test(mapper=test_mapper, buffered_size=1024, use_xmap=True): +def test(mapper=test_mapper, buffered_size=1024, use_xmap=True, cycle=False): ''' Create flowers test set reader. It returns a reader, each sample in the reader is @@ -160,14 +173,20 @@ def test(mapper=test_mapper, buffered_size=1024, use_xmap=True): :type mapper: callable :param buffered_size: the size of buffer used to process images :type buffered_size: int + :param cycle: whether to cycle through the dataset + :type cycle: bool :return: test data reader :rtype: callable ''' return reader_creator( download(DATA_URL, 'flowers', DATA_MD5), download(LABEL_URL, 'flowers', LABEL_MD5), - download(SETID_URL, 'flowers', SETID_MD5), TEST_FLAG, mapper, - buffered_size, use_xmap) + download(SETID_URL, 'flowers', SETID_MD5), + TEST_FLAG, + mapper, + buffered_size, + use_xmap, + cycle=cycle) def valid(mapper=test_mapper, buffered_size=1024, use_xmap=True): diff --git a/python/paddle/v2/dataset/mnist.py b/python/paddle/v2/dataset/mnist.py index 9f675bed89..026cf501cf 100644 --- a/python/paddle/v2/dataset/mnist.py +++ b/python/paddle/v2/dataset/mnist.py @@ -68,8 +68,14 @@ def reader_creator(image_filename, label_filename, buffer_size): for i in xrange(buffer_size): yield images[i, :], int(labels[i]) finally: - m.terminate() - l.terminate() + try: + m.terminate() + except: + pass + try: + l.terminate() + except: + pass return reader @@ -112,7 +118,7 @@ def fetch(): paddle.v2.dataset.common.download(TRAIN_IMAGE_URL, 'mnist', TRAIN_IMAGE_MD5) paddle.v2.dataset.common.download(TRAIN_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) paddle.v2.dataset.common.download(TEST_IMAGE_URL, 'mnist', TEST_IMAGE_MD5) - paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist', TRAIN_LABEL_MD5) + paddle.v2.dataset.common.download(TEST_LABEL_URL, 'mnist', TEST_LABEL_MD5) def convert(path): diff --git a/python/paddle/v2/dataset/wmt14.py b/python/paddle/v2/dataset/wmt14.py index 5104e29051..b9e602f324 100644 --- a/python/paddle/v2/dataset/wmt14.py +++ b/python/paddle/v2/dataset/wmt14.py @@ -15,7 +15,7 @@ WMT14 dataset. The original WMT14 dataset is too large and a small set of data for set is provided. This module will download dataset from -http://paddlepaddle.cdn.bcebos.com/demo/wmt_shrinked_data/wmt14.tgz and +http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz and parse training set and test set into paddle reader creators. """ @@ -37,11 +37,10 @@ URL_DEV_TEST = ('http://www-lium.univ-lemans.fr/~schwenk/' MD5_DEV_TEST = '7d7897317ddd8ba0ae5c5fa7248d3ff5' # this is a small set of data for test. The original data is too large and # will be add later. -URL_TRAIN = ('http://paddlepaddle.cdn.bcebos.com/demo/' - 'wmt_shrinked_data/wmt14.tgz') +URL_TRAIN = ('http://paddlemodels.bj.bcebos.com/wmt/wmt14.tgz') MD5_TRAIN = '0791583d57d5beb693b9414c5b36798c' # BLEU of this trained model is 26.92 -URL_MODEL = 'http://paddlepaddle.bj.bcebos.com/demo/wmt_14/wmt14_model.tar.gz' +URL_MODEL = 'http://paddlemodels.bj.bcebos.com/wmt%2Fwmt14.tgz' MD5_MODEL = '0cb4a5366189b6acba876491c8724fa3' START = "" diff --git a/python/paddle/v2/image.py b/python/paddle/v2/image.py index 9235c41e9e..08d8bd68f9 100644 --- a/python/paddle/v2/image.py +++ b/python/paddle/v2/image.py @@ -182,7 +182,7 @@ def resize_short(im, size): h_new = size * h / w else: w_new = size * w / h - im = cv2.resize(im, (h_new, w_new), interpolation=cv2.INTER_CUBIC) + im = cv2.resize(im, (w_new, h_new), interpolation=cv2.INTER_CUBIC) return im @@ -324,7 +324,6 @@ def simple_transform(im, if np.random.randint(2) == 0: im = left_right_flip(im, is_color) else: - im = center_crop(im, crop_size, is_color) im = center_crop(im, crop_size, is_color=is_color) if len(im.shape) == 3: im = to_chw(im) diff --git a/python/paddle/v2/inference.py b/python/paddle/v2/inference.py index 14b64742fd..28ee042282 100644 --- a/python/paddle/v2/inference.py +++ b/python/paddle/v2/inference.py @@ -63,7 +63,7 @@ class Inference(object): assert isinstance(val, api.Vector) val.copyFromNumpyArray(parameters.get(name).flatten()) # the setValueUpdated function is called in randomize, zeroMem, - # load function in paddle/parameter/Parameter.cpp. But in the + # load function in paddle/legacy/parameter/Parameter.cpp. But in the # inference mode, the setValueUpdated is never called, it will # cause the parameter will not be dispatched # in MultiGradientMachine for multi-GPU. So setValueUpdated is diff --git a/python/paddle/v2/minibatch.py b/python/paddle/v2/minibatch.py index 317cf037c6..3c6a53db3c 100644 --- a/python/paddle/v2/minibatch.py +++ b/python/paddle/v2/minibatch.py @@ -15,7 +15,7 @@ __all__ = ['batch'] -def batch(reader, batch_size): +def batch(reader, batch_size, drop_last=True): """ Create a batched reader. @@ -23,6 +23,8 @@ def batch(reader, batch_size): :type reader: callable :param batch_size: size of each mini-batch :type batch_size: int + :param drop_last: drop the last batch, if the size of last batch is not equal to batch_size. + :type drop_last: bool :return: the batched reader. :rtype: callable """ @@ -35,7 +37,7 @@ def batch(reader, batch_size): if len(b) == batch_size: yield b b = [] - if b: + if drop_last == False and len(b) != 0: yield b return batch_reader diff --git a/python/requirements.txt b/python/requirements.txt index ea827e9d5a..f8298a6361 100644 --- a/python/requirements.txt +++ b/python/requirements.txt @@ -1,5 +1,5 @@ requests==2.9.2 -numpy>=1.12 +numpy>=1.12,<=1.14 #TODO:change to ">=1.12" when numpy fix bug in 1.15 and higher version protobuf==3.1 recordio>=0.1.0 matplotlib @@ -8,4 +8,4 @@ scipy>=0.19.0 Pillow nltk>=3.2.2 graphviz -LinkChecker +six diff --git a/python/setup.py.in b/python/setup.py.in index c42601d335..4a6cddbbea 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -1,14 +1,13 @@ from setuptools import setup, Distribution, Extension import subprocess +import os +import re +import shutil class BinaryDistribution(Distribution): def has_ext_modules(foo): return True -MAJOR = 0 -MINOR = 11 -PATCH = 0 RC = 0 -ISTAGED = False @@ -18,16 +17,51 @@ def git_commit(): git_commit = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() except: git_commit = 'Unknown' - return git_commit + git_commit = git_commit.decode() + return str(git_commit) + +def _get_version_detail(idx): + assert idx < 3, "vesion info consists of %(major)d.%(minor)d.%(patch)d, \ + so detail index must less than 3" + + if re.match('@TAG_VERSION_REGEX@', '@PADDLE_VERSION@'): + version_details = '@PADDLE_VERSION@'.split('.') + + if len(version_details) == 3: + return version_details[idx] + + return 0 + +def get_major(): + return int(_get_version_detail(0)) + +def get_minor(): + return int(_get_version_detail(1)) + +def get_patch(): + return str(_get_version_detail(2)) + +def is_taged(): + try: + cmd = ['git', 'describe', '--exact-match', '--tags', 'HEAD', '2>/dev/null'] + git_tag = subprocess.Popen(cmd, stdout = subprocess.PIPE).communicate()[0].strip() + git_tag = git_tag.decode() + except: + return False + + if str(git_tag).replace('v', '') == '@PADDLE_VERSION@': + return True + else: + return False def write_version_py(filename='paddle/version.py'): cnt = ''' # THIS FILE IS GENERATED FROM PADDLEPADDLE SETUP.PY # -full_version = '%(major)d.%(minor)d.%(patch)d' +full_version = '%(major)d.%(minor)d.%(patch)s' major = '%(major)d' minor = '%(minor)d' -patch = '%(patch)d' +patch = '%(patch)s' rc = '%(rc)d' istaged = %(istaged)s commit = '%(commit)s' @@ -35,13 +69,13 @@ with_mkl = '%(with_mkl)s' def show(): if istaged: - print 'full_version:', full_version - print 'major:', major - print 'minor:', minor - print 'patch:', patch - print 'rc:', rc + print('full_version:', full_version) + print('major:', major) + print('minor:', minor) + print('patch:', patch) + print('rc:', rc) else: - print 'commit:', commit + print('commit:', commit) def mkl(): return with_mkl @@ -49,19 +83,20 @@ def mkl(): commit = git_commit() with open(filename, 'w') as f: f.write(cnt % { - 'major': MAJOR, - 'minor': MINOR, - 'patch': PATCH, + 'major': get_major(), + 'minor': get_minor(), + 'patch': get_patch(), 'rc': RC, 'version': '${PADDLE_VERSION}', 'commit': commit, - 'istaged': ISTAGED, + 'istaged': is_taged(), 'with_mkl': '@WITH_MKL@'}) write_version_py(filename='@PADDLE_BINARY_DIR@/python/paddle/version.py') packages=['paddle', + 'paddle.libs', 'paddle.utils', 'paddle.dataset', 'paddle.reader', @@ -69,7 +104,10 @@ packages=['paddle', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', 'paddle.fluid.layers', - 'paddle.fluid.transpiler'] + 'paddle.fluid.contrib', + 'paddle.fluid.contrib.decoder', + 'paddle.fluid.transpiler', + 'paddle.fluid.transpiler.details'] if '${WITH_FLUID_ONLY}'== 'OFF': packages+=['paddle.proto', @@ -92,9 +130,9 @@ if '${CMAKE_SYSTEM_PROCESSOR}' not in ['arm', 'armv7-a', 'aarch64']: paddle_bins = '' if '${WITH_FLUID_ONLY}'== 'OFF': paddle_bin_dir = 'opt/paddle/bin' - paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/trainer/paddle_trainer', - '${PADDLE_BINARY_DIR}/paddle/trainer/paddle_merge_model', - '${PADDLE_BINARY_DIR}/paddle/pserver/paddle_pserver_main', + paddle_bins = ['${PADDLE_BINARY_DIR}/paddle/legacy/trainer/paddle_trainer', + '${PADDLE_BINARY_DIR}/paddle/legacy/trainer/paddle_merge_model', + '${PADDLE_BINARY_DIR}/paddle/legacy/pserver/paddle_pserver_main', '${PADDLE_BINARY_DIR}/paddle/scripts/paddle'] package_data={'paddle.fluid': ['core.so']} @@ -112,12 +150,49 @@ package_dir={ } if '${WITH_FLUID_ONLY}'== 'OFF': package_dir['py_paddle']='${PADDLE_BINARY_DIR}/python/py_paddle' - -paddle_rt_lib_dir = 'lib' -paddle_rt_libs = ['${WARPCTC_LIBRARIES}'] -if '${MKL_SHARED_LIBS}'!= '': - paddle_rt_libs += '${MKL_SHARED_LIBS}'.split(';') +# put all thirdparty libraries in paddle.libs +package_data['paddle.libs']=['libwarpctc.so'] +libs_path='${PADDLE_BINARY_DIR}/python/paddle/libs' +shutil.copy('${WARPCTC_LIBRARIES}', libs_path) +if '${WITH_MKL}' == 'ON': + shutil.copy('${MKLML_LIB}', libs_path) + shutil.copy('${MKLML_IOMP_LIB}', libs_path) + package_data['paddle.libs']+=['libmklml_intel.so','libiomp5.so'] +if '${WITH_MKLDNN}' == 'ON': + # TODO(typhoonzero): use install_name_tool to patch mkl libs once + # we can support mkl on mac. + # + # change rpath of libmkldnn.so.0, add $ORIGIN/ to it. + # The reason is that all thirdparty libraries in the same directory, + # thus, libmkldnn.so.0 will find libmklml_intel.so and libiomp5.so. + command = "patchelf --set-rpath '$ORIGIN/' ${MKLDNN_SHARED_LIB}" + if os.system(command) != 0: + raise Exception("patch libmkldnn.so failed, command: %s" % command) + package_data['paddle.libs']+=['libmkldnn.so.0'] + shutil.copy('${MKLDNN_SHARED_LIB}', libs_path) +# remove unused paddle/libs/__init__.py +os.remove(libs_path+'/__init__.py') +package_dir['paddle.libs']=libs_path + +# change rpath of core.so, add $ORIGIN/../libs/ to it. +# The reason is that libwarpctc.so, libiomp5.so etc are in paddle.libs, and +# core.so is in paddle.fluid, thus paddle/fluid/../libs will pointer to above libraries. +# This operation will fix https://github.com/PaddlePaddle/Paddle/issues/3213 +if "@APPLE@" == "1": + command = "install_name_tool -id \"@loader_path/../libs/\" ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so" +else: + command = "patchelf --set-rpath '$ORIGIN/../libs/' ${PADDLE_BINARY_DIR}/python/paddle/fluid/core.so" +if os.system(command) != 0: + raise Exception("patch core.so failed, command: %s" % command) +if '${WITH_FLUID_ONLY}'== 'OFF': + # change rpath of _swig_paddle.so. + if "@APPLE@" == "1": + command = "install_name_tool -id \"@loader_path/../paddle/libs/\" ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so" + else: + command = "patchelf --set-rpath '$ORIGIN/../paddle/libs/' ${PADDLE_BINARY_DIR}/python/py_paddle/_swig_paddle.so" + if os.system(command) != 0: + raise Exception("patch _swig_paddle.so failed, command: %s" % command) setup(name='${PACKAGE_NAME}', version='${PADDLE_VERSION}', @@ -127,6 +202,5 @@ setup(name='${PACKAGE_NAME}', ext_modules=[Extension('_foo', ['stub.cc'])], package_data=package_data, package_dir=package_dir, - scripts=paddle_bins, - data_files=[(paddle_rt_lib_dir, paddle_rt_libs)] + scripts=paddle_bins ) diff --git a/tools/aws_benchmarking/server/cluster_master.py b/tools/aws_benchmarking/server/cluster_master.py index 1333a942bf..a9b2484654 100644 --- a/tools/aws_benchmarking/server/cluster_master.py +++ b/tools/aws_benchmarking/server/cluster_master.py @@ -20,6 +20,7 @@ import time import threading import logging import copy +import csv import netaddr import boto3 @@ -136,6 +137,12 @@ parser.add_argument( parser.add_argument( '--master_server_ip', type=str, default="", help="master server private ip") +parser.add_argument( + '--metric_data_identifier', + type=str, + default="**metrics_data: ", + help="key string to identify metrics data") + parser.add_argument( '--no_clean_up', type=str2bool, @@ -155,6 +162,11 @@ logging.basicConfig( log_files = ["master.log"] +metrics = {} + +metrics_csv_file_name = "metrics.csv" +is_metrics_file_created = False + def create_subnet(): # if no vpc id provided, list vpcs @@ -329,12 +341,42 @@ def create_pservers(): cleanup(args.task_name) +def save_metrics_data(str_msg): + #parse msg + logging.info("found metrics data, saving it to csv file") + global is_metrics_file_created + metrics_raw = str_msg.split(",") + with open(args.log_path + metrics_csv_file_name, 'a') as csvfile: + csv_fieldnames = [] + csv_write_data = {} + for metric in metrics_raw: + metric_data = metric.split("=") + metric_key = metric_data[0].strip() + metric_val = float(metric_data[1].strip()) + if not metric_key in metrics: + metrics[metric_key] = [] + metric_repo = metrics[metric_key] + metric_repo.append(metric_val) + csv_fieldnames.append(metric_key) + csv_write_data[metric_key] = metric_val + writer = csv.DictWriter(csvfile, fieldnames=csv_fieldnames) + if not is_metrics_file_created: + writer.writeheader() + is_metrics_file_created = True + writer.writerow(csv_write_data) + logging.info("csv file appended") + + def log_to_file(source, filename): if not filename in log_files: log_files.append(filename) with open(args.log_path + filename, "a") as log_file: for line in iter(source.readline, ""): log_file.write(line) + if (line.startswith(args.metric_data_identifier)): + #found key data, trying to add to csv + line = line.replace(args.metric_data_identifier, "") + save_metrics_data(line) def parse_command(command_raw, defaults={}): diff --git a/tools/check_ctest_hung.py b/tools/check_ctest_hung.py new file mode 100644 index 0000000000..7de76c381b --- /dev/null +++ b/tools/check_ctest_hung.py @@ -0,0 +1,53 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +import re + + +def escape(input): + o = input.replace("\n", "") + o = o.replace("\r", "") + return o + + +def main(): + usage = """Usage: +1. Download the Paddle_PR_CI_*.log from TeamCity +2. run: python check_ctest_hung.py Paddle_PR_CI_*.log +3. If there is hung ctest, the result likes: +Diff: set(['test_parallel_executor_crf']) + """ + if len(sys.argv) < 2: + print(usage) + exit(0) + + logfile = sys.argv[1] + started = set() + passed = set() + with open(logfile, "r") as fn: + for l in fn.readlines(): + if l.find("Test ") != -1 and \ + l.find("Passed") != -1: + m = re.search("Test\s+#[0-9]*\:\s([a-z0-9_]+)", escape(l)) + passed.add(m.group(1)) + if l.find("Start ") != -1: + start_parts = escape(l).split(" ") + m = re.search("Start\s+[0-9]+\:\s([a-z0-9_]+)", escape(l)) + started.add(m.group(1)) + print "Diff: ", started - passed + + +if __name__ == "__main__": + main() diff --git a/tools/check_pr_approval.py b/tools/check_pr_approval.py new file mode 100644 index 0000000000..937b0be756 --- /dev/null +++ b/tools/check_pr_approval.py @@ -0,0 +1,49 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import sys +import json + + +def check_approval(count, required_reviewers): + json_buff = "" + for line in sys.stdin: + json_buff = "".join([json_buff, line]) + json_resp = json.loads(json_buff) + approves = 0 + approved_user_ids = [] + for review in json_resp: + if review["state"] == "APPROVED": + approves += 1 + approved_user_ids.append(review["user"]["id"]) + + # convert to int + required_reviewers_int = set() + for rr in required_reviewers: + required_reviewers_int.add(int(rr)) + + if len(set(approved_user_ids) & required_reviewers_int) >= count: + print("TRUE") + else: + print("FALSE") + + +if __name__ == "__main__": + if len(sys.argv) > 1 and sys.argv[1].isdigit(): + check_approval(int(sys.argv[1]), sys.argv[2:]) + else: + print( + "Usage: python check_pr_approval.py [count] [required reviewer id] ..." + ) diff --git a/tools/codestyle/.gitignore b/tools/codestyle/.gitignore new file mode 100644 index 0000000000..0d20b6487c --- /dev/null +++ b/tools/codestyle/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/.clang_format.hook b/tools/codestyle/clang_format.hook similarity index 100% rename from .clang_format.hook rename to tools/codestyle/clang_format.hook diff --git a/.copyright.hook b/tools/codestyle/copyright.hook similarity index 98% rename from .copyright.hook rename to tools/codestyle/copyright.hook index 09afff2072..86b16ebdc4 100644 --- a/.copyright.hook +++ b/tools/codestyle/copyright.hook @@ -9,7 +9,7 @@ import subprocess import platform COPYRIGHT = ''' - Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. +Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/tools/codestyle/cpplint_pre_commit.hook b/tools/codestyle/cpplint_pre_commit.hook index 94d1e23ce7..aa14d3a2a1 100755 --- a/tools/codestyle/cpplint_pre_commit.hook +++ b/tools/codestyle/cpplint_pre_commit.hook @@ -4,8 +4,12 @@ TOTAL_ERRORS=0 # The trick to remove deleted files: https://stackoverflow.com/a/2413151 for file in $(git diff --cached --name-status | awk '$1 != "D" {print $2}'); do - cpplint $file; - TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); + if [[ $file =~ ^(paddle/legacy/api/.*|paddle/legacy/capi/.*|paddle/contrib/.*|paddle/legacy/cuda/.*|paddle/legacy/function/.*|paddle/legacy/gserver/.*|paddle/legacy/math/.*|paddle/legacy/optimizer/.*|paddle/legacy/parameter/.*|paddle/legacy/pserver/.*|paddle/legacy/trainer/.*|paddle/legacy/utils/.*|paddle/testing/TestUtil.*|patches/grpc/.*) ]]; then + continue; + else + cpplint --filter=-readability/fn_size $file; + TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); + fi done exit $TOTAL_ERRORS diff --git a/tools/codestyle/docstring_checker.py b/tools/codestyle/docstring_checker.py new file mode 100644 index 0000000000..8d4b24a0cf --- /dev/null +++ b/tools/codestyle/docstring_checker.py @@ -0,0 +1,349 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""DocstringChecker is used to check python doc string's style.""" + +import six +import astroid + +from pylint.checkers import BaseChecker, utils +from pylint.interfaces import IAstroidChecker + +from collections import defaultdict +import re + + +def register(linter): + """Register checkers.""" + linter.register_checker(DocstringChecker(linter)) + + +class Docstring(object): + """Docstring class holds the parsed doc string elements. + """ + + def __init__(self): + self.d = defaultdict(list) #name->[] + self.clear() + + def clear(self): + self.d['Args'] = [] + self.d['Examples'] = [] + self.d['Returns'] = [] + self.d['Raises'] = [] + self.args = {} #arg_name->arg_type + + def get_level(self, string, indent=' '): + level = 0 + unit_size = len(indent) + while string[:unit_size] == indent: + string = string[unit_size:] + level += 1 + + return level + + def parse(self, doc): + """parse gets sections from doc + Such as Args, Returns, Raises, Examples s + Args: + doc (string): is the astroid node doc string. + Returns: + True if doc is parsed successfully. + """ + self.clear() + + lines = doc.splitlines() + state = ("others", -1) + for l in lines: + c = l.strip() + if len(c) <= 0: + continue + + level = self.get_level(l) + if c.startswith("Args:"): + state = ("Args", level) + elif c.startswith("Returns:"): + state = ("Returns", level) + elif c.startswith("Raises:"): + state = ("Raises", level) + elif c.startswith("Examples:"): + state = ("Examples", level) + else: + if level > state[1]: + self.d[state[0]].append(c) + continue + + state = ("others", -1) + self.d[state[0]].append(c) + + self._arg_with_type() + return True + + def get_returns(self): + return self.d['Returns'] + + def get_raises(self): + return self.d['Raises'] + + def get_examples(self): + return self.d['Examples'] + + def _arg_with_type(self): + + for t in self.d['Args']: + m = re.search('([A-Za-z0-9_-]+)\s{0,4}(\(.+\))\s{0,4}:', t) + if m: + self.args[m.group(1)] = m.group(2) + + return self.args + + +class DocstringChecker(BaseChecker): + """DosstringChecker is pylint checker to + check docstring style. + """ + __implements__ = (IAstroidChecker, ) + + POSITIONAL_MESSAGE_ID = 'str-used-on-positional-format-argument' + KEYWORD_MESSAGE_ID = 'str-used-on-keyword-format-argument' + + name = 'doc-string-checker' + symbol = "doc-string" + priority = -1 + msgs = { + 'W9001': ('One line doc string on > 1 lines', symbol + "-one-line", + 'Used when a short doc string is on multiple lines'), + 'W9002': + ('Doc string does not end with "." period', symbol + "-end-with", + 'Used when a doc string does not end with a period'), + 'W9003': + ('All args with their types must be mentioned in doc string %s', + symbol + "-with-all-args", + 'Used when not all arguments are in the doc string '), + 'W9005': ('Missing docstring or docstring is too short', + symbol + "-missing", 'Add docstring longer >=10'), + 'W9006': ('Docstring indent error, use 4 space for indent', + symbol + "-indent-error", 'Use 4 space for indent'), + 'W9007': ('You should add `Returns` in comments', + symbol + "-with-returns", + 'There should be a `Returns` section in comments'), + 'W9008': ('You should add `Raises` section in comments', + symbol + "-with-raises", + 'There should be a `Raises` section in comments'), + } + options = () + + def visit_functiondef(self, node): + """visit_functiondef checks Function node docstring style. + Args: + node (astroid.node): The visiting node. + Returns: + True if successful other wise False. + """ + + self.check_doc_string(node) + + if node.tolineno - node.fromlineno <= 10: + return True + + if not node.doc: + return True + + doc = Docstring() + doc.parse(node.doc) + + self.all_args_in_doc(node, doc) + self.with_returns(node, doc) + self.with_raises(node, doc) + + def visit_module(self, node): + self.check_doc_string(node) + + def visit_classdef(self, node): + self.check_doc_string(node) + + def check_doc_string(self, node): + self.missing_doc_string(node) + self.one_line(node) + self.has_period(node) + self.indent_style(node) + + def missing_doc_string(self, node): + if node.name.startswith("__") or node.name.startswith("_"): + return True + if node.tolineno - node.fromlineno <= 10: + return True + + if node.doc is None or len(node.doc) < 10: + self.add_message('W9005', node=node, line=node.fromlineno) + return False + + # FIXME(gongwb): give the docstring line-no + def indent_style(self, node, indent=4): + """indent_style checks docstring's indent style + Args: + node (astroid.node): The visiting node. + indent (int): The default indent of style + Returns: + True if successful other wise False. + """ + if node.doc is None: + return True + + doc = node.doc + lines = doc.splitlines() + line_num = 0 + + for l in lines: + if line_num == 0: + continue + cur_indent = len(l) - len(l.lstrip()) + if cur_indent % indent != 0: + self.add_message('W9006', node=node, line=node.fromlineno) + return False + line_num += 1 + + return True + + def one_line(self, node): + """one_line checks if docstring (len < 40) is on one line. + Args: + node (astroid.node): The node visiting. + Returns: + True if successful otherwise False. + """ + + doc = node.doc + if doc is None: + return True + + if len(doc) > 40: + return True + elif sum(doc.find(nl) for nl in ('\n', '\r', '\n\r')) == -3: + return True + else: + self.add_message('W9001', node=node, line=node.fromlineno) + return False + + return True + + def has_period(self, node): + """has_period checks if one line doc end-with '.' . + Args: + node (astroid.node): the node is visiting. + Returns: + True if successful otherwise False. + """ + if node.doc is None: + return True + + if len(node.doc.splitlines()) > 1: + return True + + if not node.doc.strip().endswith('.'): + self.add_message('W9002', node=node, line=node.fromlineno) + return False + + return True + + def with_raises(self, node, doc): + """with_raises checks if one line doc end-with '.' . + Args: + node (astroid.node): the node is visiting. + doc (Docstring): Docstring object. + Returns: + True if successful otherwise False. + """ + + find = False + for t in node.body: + if not isinstance(t, astroid.Raise): + continue + + find = True + break + + if not find: + return True + + if len(doc.get_raises()) == 0: + self.add_message('W9008', node=node, line=node.fromlineno) + return False + + return True + + def with_returns(self, node, doc): + """with_returns checks if docstring comments what are returned . + Args: + node (astroid.node): the node is visiting. + doc (Docstring): Docstring object. + Returns: + True if successful otherwise False. + """ + + if node.name.startswith("__") or node.name.startswith("_"): + return True + find = False + for t in node.body: + if not isinstance(t, astroid.Return): + continue + + find = True + break + + if not find: + return True + + if len(doc.get_returns()) == 0: + self.add_message('W9007', node=node, line=node.fromlineno) + return False + + return True + + def all_args_in_doc(self, node, doc): + """all_args_in_doc checks if arguments are mentioned in doc + Args: + node (astroid.node): the node is visiting. + doc (Docstring): Docstring object + Returns: + True if successful otherwise False. + """ + if node.name.startswith("__") or node.name.startswith("_"): + return True + args = [] + for arg in node.args.get_children(): + if (not isinstance(arg, astroid.AssignName)) \ + or arg.name == "self": + continue + args.append(arg.name) + + if len(args) <= 0: + return True + + parsed_args = doc.args + args_not_documented = set(args) - set(parsed_args) + if len(args) > 0 and len(parsed_args) <= 0: + self.add_message( + 'W9003', + node=node, + line=node.fromlineno, + args=list(args_not_documented)) + return False + + for t in args: + if t not in parsed_args: + self.add_message( + 'W9003', node=node, line=node.fromlineno, args=[t, ]) + return False + + return True diff --git a/tools/codestyle/pylint_pre_commit.hook b/tools/codestyle/pylint_pre_commit.hook new file mode 100755 index 0000000000..150a3f5666 --- /dev/null +++ b/tools/codestyle/pylint_pre_commit.hook @@ -0,0 +1,19 @@ +#!/bin/bash + +TOTAL_ERRORS=0 + + +DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +export PYTHONPATH=$DIR:$PYTHONPATH + +# The trick to remove deleted files: https://stackoverflow.com/a/2413151 +for file in $(git diff --name-status | awk '$1 != "D" {print $2}'); do + pylint --disable=all --load-plugins=docstring_checker \ + --enable=doc-string-one-line,doc-string-end-with,doc-string-with-all-args,doc-string-triple-quotes,doc-string-missing,doc-string-indent-error,doc-string-with-returns,doc-string-with-raises $file; + TOTAL_ERRORS=$(expr $TOTAL_ERRORS + $?); +done + +exit $TOTAL_ERRORS +#For now, just warning: +#exit 0 + diff --git a/tools/codestyle/test_docstring_checker.py b/tools/codestyle/test_docstring_checker.py new file mode 100644 index 0000000000..0547f7d161 --- /dev/null +++ b/tools/codestyle/test_docstring_checker.py @@ -0,0 +1,232 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import docstring_checker +import pylint.testutils +import astroid +import pytest +import sys + + +class TestDocstring(pylint.testutils.CheckerTestCase): + CHECKER_CLASS = docstring_checker.DocstringChecker + + def test_one_line(self): + func_node = astroid.extract_node(''' + def test(): + """get + news. + """ + if True: + return 5 + return 5 + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9001' == got[0][0] + + def test_one_line(self): + func_node = astroid.extract_node(''' + def test(): + """get news""" + if True: + return 5 + return 5 + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9002' == got[0][0] + + def test_args(self): + func_node = astroid.extract_node(''' + def test(scale, mean): + """get news. + Args: + scale (int): scale is the number. + """ + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9003' == got[0][0] + + def test_missing(self): + func_node = astroid.extract_node(''' + def test(): + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9005' == got[0][0] + + def test_indent(self): + func_node = astroid.extract_node(''' + def test(): + """ get get get get get get get get + get get get get get get get get. + """ + pass + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9006' == got[0][0] + + def test_with_resturns(self): + func_node = astroid.extract_node(''' + def test(): + """get news. + Args: + scale (int): scale is the number. + """ + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + return mean + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9007' == got[0][0] + + def test_with_raises(self): + func_node = astroid.extract_node(''' + def test(): + """get news. + Args: + scale (int): scale is the number. + """ + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + mean=scale + raise ValueError('A very specific bad thing happened.') + ''') + + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 1 + assert 'W9008' == got[0][0] + + def test_no_message(self): + p = ''' +def fc(input, + size, + num_flatten_dims=1, + param_attr=None, + bias_attr=None, + act=None, + name=None): + """ + **Fully Connected Layer** + The fully connected layer can take multiple tensors as its inputs. It + creates a variable called weights for each input tensor, which represents + a fully connected weight matrix from each input unit to each output unit. + The fully connected layer multiplies each input tensor with its coresponding + weight to produce an output Tensor. If multiple input tensors are given, + the results of multiple multiplications will be sumed up. If bias_attr is + not None, a bias variable will be created and added to the output. Finally, + if activation is not None, it will be applied to the output as well. + This process can be formulated as follows: + + Args: + input (Variable|list of Variable): The input tensor(s) of this layer, and the dimension of + the input tensor(s) is at least 2. + size(int): The number of output units in this layer. + num_flatten_dims (int, default 1): The fc layer can accept an input tensor with more than + two dimensions. If this happens, the multidimensional tensor will first be flattened + into a 2-dimensional matrix. The parameter `num_flatten_dims` determines how the input + tensor is flattened: the first `num_flatten_dims` (inclusive, index starts from 1) + dimensions will be flatten to form the first dimension of the final matrix (height of + the matrix), and the rest `rank(X) - num_flatten_dims` dimensions are flattened to + form the second dimension of the final matrix (width of the matrix). For example, suppose + `X` is a 6-dimensional tensor with a shape [2, 3, 4, 5, 6], and `num_flatten_dims` = 3. + Then, the flattened matrix will have a shape [2 x 3 x 4, 5 x 6] = [24, 30]. + param_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for learnable + parameters/weights of this layer. + bias_attr (ParamAttr|list of ParamAttr, default None): The parameter attribute for the bias + of this layer. If it is set to None, no bias will be added to the output units. + act (str, default None): Activation to be applied to the output of this layer. + name (str, default None): The name of this layer. + Returns: + A tensor variable storing the transformation result. + Raises: + ValueError: If rank of the input tensor is less than 2. + Examples: + .. code-block:: python + data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") + fc = fluid.layers.fc(input=data, size=1000, act="tanh") + """ + raise ValueError('A very specific bad thing happened.') + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + size = 1 + return size + ''' + + func_node = astroid.extract_node(p) + self.checker.visit_functiondef(func_node) + got = self.linter.release_messages() + assert len(got) == 0 diff --git a/tools/diff_api.py b/tools/diff_api.py new file mode 100644 index 0000000000..97c739ed2a --- /dev/null +++ b/tools/diff_api.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python +from __future__ import print_function +import difflib +import sys + +with open(sys.argv[1], 'r') as f: + origin = f.read() + origin = origin.splitlines() + +with open(sys.argv[2], 'r') as f: + new = f.read() + new = new.splitlines() + +differ = difflib.Differ() +result = differ.compare(origin, new) + +error = False +print('API Difference is: ') +for each_diff in result: + if each_diff[0] in ['-', '?']: # delete or change API is not allowed + error = True + elif each_diff[0] == '+': + error = True + + if each_diff[0] != ' ': + print(each_diff) + +if error: + sys.exit(1) diff --git a/tools/manylinux1/Dockerfile.x64 b/tools/manylinux1/Dockerfile.x64 index bca0b77ad7..0d59e4c110 100644 --- a/tools/manylinux1/Dockerfile.x64 +++ b/tools/manylinux1/Dockerfile.x64 @@ -13,7 +13,7 @@ ENV PATH /opt/rh/devtoolset-2/root/usr/bin:$PATH ENV LD_LIBRARY_PATH /opt/rh/devtoolset-2/root/usr/lib64:/opt/rh/devtoolset-2/root/usr/lib:/usr/local/lib64:/usr/local/lib:${LD_LIBRARY_PATH} ENV PKG_CONFIG_PATH=/usr/local/lib/pkgconfig -RUN yum install -y sqlite-devel zlib-devel openssl-devel pcre-devel vim tk-devel tkinter libtool xz +RUN yum install -y sqlite-devel zlib-devel openssl-devel pcre-devel vim tk-devel tkinter libtool xz graphviz COPY build_scripts /build_scripts RUN bash build_scripts/build.sh && \ bash build_scripts/install_nccl2.sh && rm -r build_scripts @@ -40,11 +40,13 @@ RUN wget -O /root/requirements.txt https://raw.githubusercontent.com/PaddlePaddl RUN LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27mu/bin/pip install -r /root/requirements.txt && \ LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install -r /root/requirements.txt && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.5.1/bin/pip3 install -r /root/requirements.txt && \ go get github.com/Masterminds/glide && \ rm -rf /root/requirements.txt RUN LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27mu/bin/pip install pre-commit 'ipython==5.3.0' opencv-python && \ - LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install pre-commit 'ipython==5.3.0' opencv-python + LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH} /opt/python/cp27-cp27m/bin/pip install pre-commit 'ipython==5.3.0' opencv-python && \ + LD_LIBRARY_PATH=/opt/_internal/cpython-3.5.1/lib/:${LD_LIBRARY_PATH} /opt/_internal/cpython-3.5.1/bin/pip3 install pre-commit 'ipython==5.3.0' opencv-python RUN wget -O /opt/swig-2.0.12.tar.gz https://cytranet.dl.sourceforge.net/project/swig/swig/swig-2.0.12/swig-2.0.12.tar.gz && \ cd /opt && tar xzf swig-2.0.12.tar.gz && cd /opt/swig-2.0.12 && ./configure && make && make install && cd /opt && rm swig-2.0.12.tar.gz diff --git a/tools/manylinux1/README.md b/tools/manylinux1/README.md index 898e00bd37..0e59050401 100644 --- a/tools/manylinux1/README.md +++ b/tools/manylinux1/README.md @@ -28,3 +28,38 @@ git clone https://github.com/paddlepaddle/paddle cd paddle/tools/manylinux1 REPO=[yourrepo] ./build_all.sh ``` + +## Build PaddlePaddle for the different Python ABIs + +Choose one of the following Python ABI and set the correct environment variables. + +- cp27-cp27m + + ```bash + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs2/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs4/lib:} + export PATH=/opt/python/cp27-cp27m/bin/:${PATH} + export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27m/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27m/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs2/lib/libpython2.7.so" + ``` + +- cp27-cp27mu + + ```bash + export LD_LIBRARY_PATH=/opt/_internal/cpython-2.7.11-ucs4/lib:${LD_LIBRARY_PATH#/opt/_internal/cpython-2.7.11-ucs2/lib:} + export PATH=/opt/python/cp27-cp27mu/bin/:${PATH} + export PYTHON_FLAGS="-DPYTHON_EXECUTABLE:FILEPATH=/opt/python/cp27-cp27mu/bin/python + -DPYTHON_INCLUDE_DIR:PATH=/opt/python/cp27-cp27mu/include/python2.7 + -DPYTHON_LIBRARIES:FILEPATH=/opt/_internal/cpython-2.7.11-ucs4/lib/libpython2.7.so" + ``` + +And then add the `PYTHON_FLAGS` as your cmake flags: + +```bash +cmake .. + ${PYTHON_FLAGS} \ + -DWITH_GPU=OFF \ + ... +``` + +You can find more details about cmake flags at [here](http://www.paddlepaddle.org/docs/develop/documentation/fluid/en/build_and_install/build_from_source_en.html#appendix-build-options) diff --git a/tools/print_signatures.py b/tools/print_signatures.py new file mode 100644 index 0000000000..5e7ffd44c7 --- /dev/null +++ b/tools/print_signatures.py @@ -0,0 +1,67 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Print all signature of a python module in alphabet order. + +Usage: + ./print_signature "paddle.fluid" > signature.txt +""" +import importlib +import inspect +import collections +import sys +import pydoc + +member_dict = collections.OrderedDict() + + +def visit_member(parent_name, member): + cur_name = ".".join([parent_name, member.__name__]) + if inspect.isclass(member): + for name, value in inspect.getmembers(member): + if hasattr(value, '__name__') and (not name.startswith("_") or + name == "__init__"): + visit_member(cur_name, value) + elif callable(member): + try: + member_dict[cur_name] = inspect.getargspec(member) + except TypeError: # special for PyBind method + member_dict[cur_name] = " ".join([ + line.strip() for line in pydoc.render_doc(member).split('\n') + if "->" in line + ]) + + else: + raise RuntimeError("Unsupported generate signature of member, type {0}". + format(str(type(member)))) + + +def visit_all_module(mod): + for member_name in ( + name + for name in (mod.__all__ if hasattr(mod, "__all__") else dir(mod)) + if not name.startswith("_")): + instance = getattr(mod, member_name, None) + if instance is None: + continue + if inspect.ismodule(instance): + visit_all_module(instance) + else: + visit_member(mod.__name__, instance) + + +visit_all_module(importlib.import_module(sys.argv[1])) + +for name in member_dict: + print name, member_dict[name] diff --git a/tools/test_runner.py b/tools/test_runner.py new file mode 100644 index 0000000000..2d6a3cf8a9 --- /dev/null +++ b/tools/test_runner.py @@ -0,0 +1,52 @@ +# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import unittest +import os +import sys +import paddle.fluid as fluid +import importlib +from six.moves import cStringIO + + +def main(): + sys.path.append(os.getcwd()) + some_test_failed = False + for module_name in sys.argv[1:]: + buffer = cStringIO() + main = fluid.Program() + startup = fluid.Program() + scope = fluid.core.Scope() + with fluid.program_guard(main, startup): + with fluid.scope_guard(scope): + with fluid.unique_name.guard(): + test_loader = unittest.TestLoader() + module = importlib.import_module(module_name) + tests = test_loader.loadTestsFromModule(module) + res = unittest.TextTestRunner(stream=buffer).run(tests) + if not res.wasSuccessful(): + some_test_failed = True + print( + module_name, + 'failed\n', + buffer.getvalue(), + file=sys.stderr) + + if some_test_failed: + exit(1) + + +if __name__ == '__main__': + main() diff --git a/tools/timeline.py b/tools/timeline.py index 8cd6353d46..b413bb6fe0 100644 --- a/tools/timeline.py +++ b/tools/timeline.py @@ -171,7 +171,7 @@ if args.timeline_path: profile_paths = profile_path.split(',') profile_dict = dict() -if len(profile_path) == 1: +if len(profile_paths) == 1: with open(profile_path, 'r') as f: profile_s = f.read() profile_pb = profiler_pb2.Profile()