Merge branch 'develop' into build_arm

release/0.10.0
Liu Yiqun 8 years ago
commit 2ae3dd08f9

@ -1,6 +1,6 @@
# A image for building paddle binaries
# Use cuda devel base image for both cpu and gpu environment
FROM nvidia/cuda:7.5-cudnn5-devel-ubuntu14.04
FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu14.04
MAINTAINER PaddlePaddle Authors <paddle-dev@baidu.com>
ARG UBUNTU_MIRROR

@ -15,6 +15,7 @@ list(APPEND CUDNN_CHECK_LIBRARY_DIRS
${CUDNN_ROOT}
${CUDNN_ROOT}/lib64
${CUDNN_ROOT}/lib
${CUDNN_ROOT}/lib/x86_64-linux-gnu
$ENV{CUDNN_ROOT}
$ENV{CUDNN_ROOT}/lib64
$ENV{CUDNN_ROOT}/lib

@ -38,6 +38,10 @@ ExternalProject_Add(
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DBUILD_TESTING=OFF
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
)
LIST(APPEND external_project_dependencies gflags)

@ -42,6 +42,10 @@ ExternalProject_Add(
CMAKE_ARGS -DWITH_GFLAGS=ON
CMAKE_ARGS -Dgflags_DIR=${GFLAGS_INSTALL_DIR}/lib/cmake/gflags
CMAKE_ARGS -DBUILD_TESTING=OFF
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GLOG_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
)
LIST(APPEND external_project_dependencies glog)

@ -45,11 +45,15 @@ IF(WITH_TESTING)
CMAKE_ARGS -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
CMAKE_ARGS -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
CMAKE_ARGS -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX=${GTEST_INSTALL_DIR}
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DBUILD_GMOCK=ON
CMAKE_ARGS -Dgtest_disable_pthreads=ON
CMAKE_ARGS -Dgtest_force_shared_crt=ON
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GTEST_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
)
LIST(APPEND external_project_dependencies gtest)
ENDIF(WITH_TESTING)

@ -29,7 +29,24 @@ IF(NOT ${CBLAS_FOUND})
IF(CMAKE_COMPILER_IS_GNUCC)
ENABLE_LANGUAGE(Fortran)
LIST(APPEND CBLAS_LIBRARIES gfortran pthread)
if (NOT CMAKE_Fortran_COMPILER_VERSION)
# cmake < 3.4 cannot get CMAKE_Fortran_COMPILER_VERSION directly.
execute_process(COMMAND ${CMAKE_Fortran_COMPILER} -dumpversion
OUTPUT_VARIABLE CMAKE_Fortran_COMPILER_VERSION)
endif()
string(REGEX MATCHALL "[0-9]+" Fortran_VERSION ${CMAKE_Fortran_COMPILER_VERSION})
list(GET Fortran_VERSION 0 Fortran_MAJOR)
list(GET Fortran_VERSION 1 Fortran_MINOR)
find_library(GFORTRAN_LIBRARY NAMES gfortran PATHS
/lib
/usr/lib
/usr/lib/gcc/x86_64-linux-gnu/${Fortran_MAJOR}.${Fortran_MINOR}/
/usr/lib/gcc/x86_64-linux-gnu/${Fortran_MAJOR}/)
if (NOT GFORTRAN_LIBRARY)
message(FATAL_ERROR "Cannot found gfortran library which it is used by openblas")
endif()
find_package(Threads REQUIRED)
LIST(APPEND CBLAS_LIBRARIES ${GFORTRAN_LIBRARY} ${CMAKE_THREAD_LIBS_INIT})
ENDIF(CMAKE_COMPILER_IS_GNUCC)
IF(NOT CMAKE_Fortran_COMPILER)

@ -58,12 +58,20 @@ IF(NOT PROTOBUF_FOUND)
GIT_TAG "9f75c5aa851cd877fb0d93ccc31b8567a6706546"
CONFIGURE_COMMAND
${CMAKE_COMMAND} ${PROTOBUF_SOURCES_DIR}/src/protobuf/cmake
-Dprotobuf_BUILD_TESTS=OFF
-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=lib
-Dprotobuf_BUILD_TESTS=OFF
-DZLIB_ROOT:FILEPATH=${ZLIB_ROOT}
-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE=Release
-DCMAKE_INSTALL_PREFIX=${PROTOBUF_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=lib
CMAKE_CACHE_ARGS
-DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR}
-DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_VERBOSE_MAKEFILE:BOOL=OFF
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DZLIB_ROOT:STRING=${ZLIB_ROOT}
)
LIST(APPEND external_project_dependencies protobuf)

@ -56,8 +56,13 @@ ExternalProject_Add(
CMAKE_ARGS -DWITH_GPU=${WITH_GPU}
CMAKE_ARGS -DWITH_OMP=${USE_OMP}
CMAKE_ARGS -DWITH_TORCH=OFF
CMAKE_ARGS -DCMAKE_DISABLE_FIND_PACKAGE_Torch=TRUE
CMAKE_ARGS -DCMAKE_DISABLE_FIND_PACKAGE_Torch=ON
CMAKE_ARGS -DBUILD_SHARED=ON
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
CMAKE_CACHE_ARGS -DCMAKE_BUILD_TYPE:STRING=Release
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_INSTALL_PREFIX:PATH=${WARPCTC_INSTALL_DIR}
)
LIST(APPEND external_project_dependencies warpctc)

@ -42,6 +42,10 @@ ExternalProject_Add(
CMAKE_ARGS -DBUILD_SHARED_LIBS=OFF
CMAKE_ARGS -DCMAKE_POSITION_INDEPENDENT_CODE=ON
CMAKE_ARGS -DCMAKE_MACOSX_RPATH=ON
CMAKE_ARGS -DCMAKE_BUILD_TYPE=Release
CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ZLIB_INSTALL_DIR}
-DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING=Release
)
LIST(APPEND external_project_dependencies zlib)

@ -4,119 +4,139 @@ PaddlePaddle的Docker容器使用方式
PaddlePaddle目前唯一官方支持的运行的方式是Docker容器。因为Docker能在所有主要操作系统包括LinuxMac OS X和Windows上运行。 请注意,您需要更改 `Dockers设置 <https://github.com/PaddlePaddle/Paddle/issues/627>`_ 才能充分利用Mac OS X和Windows上的硬件资源。
纯CPU和GPU的docker镜像使用说明
PaddlePaddle发布的docker镜像使用说明
------------------------------
对于每一个PaddlePaddle版本我们都会发布两个Docker镜像纯CPU的和GPU的
我们通过设置 `dockerhub.com <https://hub.docker.com/r/paddledev/paddle/>`_ 自动生成最新的docker镜像
`paddledev/paddle:0.10.0rc1-cpu``paddledev/paddle:0.10.0rc1-gpu`
对于每一个PaddlePaddle版本我们都会发布两种Docker镜像开发镜像、运行镜像。运行镜像包括纯CPU版本和GPU版本以及其对应的非AVX版本
我们会在 `dockerhub.com <https://hub.docker.com/r/paddledev/paddle/>`_ 提供最新的docker镜像可以在"tags"标签下找到最新的Paddle镜像版本。
1. 开发镜像::code:`paddlepaddle/paddle:<version>-dev`
以交互容器方式运行纯CPU的镜像
这个镜像包含了Paddle相关的开发工具以及编译和运行环境。用户可以使用开发镜像代替配置本地环境完成开发编译发布
文档编写等工作。由于不同的Paddle的版本可能需要不同的依赖和工具所以如果需要自行配置开发环境需要考虑版本的因素。
开发镜像包含了以下工具:
- gcc/clang
- nvcc
- Python
- sphinx
- woboq
- sshd
很多开发者会使用远程的安装有GPU的服务器工作用户可以使用ssh登录到这台服务器上并执行 :code:`docker exec`进入开发镜像并开始工作,
也可以在开发镜像中启动一个SSHD服务方便开发者直接登录到镜像中进行开发:
.. code-block:: bash
以交互容器方式运行开发镜像:
docker run -it --rm paddledev/paddle:0.10.0rc1-cpu /bin/bash
.. code-block:: bash
或者,可以以后台进程方式运行容器:
docker run -it --rm paddledev/paddle:<version>-dev /bin/bash
.. code-block:: bash
或者,可以以后台进程方式运行容器:
docker run -d -p 2202:22 -p 8888:8888 paddledev/paddle:0.10.0rc1-cpu
.. code-block:: bash
然后用密码 :code:`root` SSH进入容器
docker run -d -p 2202:22 -p 8888:8888 paddledev/paddle:<version>-dev
.. code-block:: bash
然后用密码 :code:`root` SSH进入容器
ssh -p 2202 root@localhost
.. code-block:: bash
SSH方式的一个优点是我们可以从多个终端进入容器。比如一个终端运行vi另一个终端运行Python。另一个好处是我们可以把PaddlePaddle容器运行在远程服务器上并在笔记本上通过SSH与其连接。
ssh -p 2202 root@localhost
SSH方式的一个优点是我们可以从多个终端进入容器。比如一个终端运行vi另一个终端运行Python。另一个好处是我们可以把PaddlePaddle容器运行在远程服务器上并在笔记本上通过SSH与其连接。
以上方法在GPU镜像里也能用只是请不要忘记按装CUDA驱动以及告诉Docker
2. 运行镜像根据CPU、GPU和非AVX区分了如下4个镜像
- GPU/AVX:code:`paddlepaddle/paddle:<version>-gpu`
- GPU/no-AVX:code:`paddlepaddle/paddle:<version>-gpu-noavx`
- CPU/AVX:code:`paddlepaddle/paddle:<version>`
- CPU/no-AVX:code:`paddlepaddle/paddle:<version>-noavx`
.. code-block:: bash
纯CPU镜像以及GPU镜像都会用到AVX指令集但是2008年之前生产的旧电脑不支持AVX。以下指令能检查Linux电脑是否支持AVX
export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')"
export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:0.10.0rc1-gpu
.. code-block:: bash
if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi
运行PaddlePaddle书籍
---------------------
如果输出是No就需要选择使用no-AVX的镜像
Jupyter Notebook是一个开源的web程序大家可以通过它制作和分享带有代码、公式、图表、文字的交互式文档。用户可以通过网页浏览文档。
以上方法在GPU镜像里也能用只是请不要忘记提前在物理机上安装GPU最新驱动。
为了保证GPU驱动能够在镜像里面正常运行我们推荐使用[nvidia-docker](https://github.com/NVIDIA/nvidia-docker)来运行镜像。
PaddlePaddle书籍是为用户和开发者制作的一个交互式的Jupyter Nodebook。
如果您想要更深入了解deep learningPaddlePaddle书籍一定是您最好的选择。
.. code-block:: bash
当您进入容器内之后,只用运行以下命令:
nvidia-docker run -it --rm paddledev/paddle:0.10.0rc1-gpu /bin/bash
.. code-block:: bash
jupyter notebook
注意: 如果使用nvidia-docker存在问题你也许可以尝试更老的方法具体如下但是我们并不推荐这种方法。
然后在浏览器中输入以下网址:
.. code-block:: text
.. code-block:: bash
http://localhost:8888/
export CUDA_SO="$(\ls /usr/lib64/libcuda* | xargs -I{} echo '-v {}:{}') $(\ls /usr/lib64/libnvidia* | xargs -I{} echo '-v {}:{}')"
export DEVICES=$(\ls /dev/nvidia* | xargs -I{} echo '--device {}:{}')
docker run ${CUDA_SO} ${DEVICES} -it paddledev/paddle:<version>-gpu
就这么简单,享受您的旅程!
3. 使用运行镜像发布你的AI程序
假设您已经完成了一个AI训练的python程序 :code:`a.py`,这个程序是您在开发机上使用开发镜像完成开发。此时您可以运行这个命令在开发机上进行测试运行:
.. code-block:: bash
非AVX镜像
---------
docker run -it -v $PWD:/work paddle /work/a.py
纯CPU镜像以及GPU镜像都会用到AVX指令集但是2008年之前生产的旧电脑不支持AVX。以下指令能检查Linux电脑是否支持AVX
这里`a.py`包含的所有依赖假设都可以在Paddle的运行容器中。如果需要包含更多的依赖、或者需要发布您的应用的镜像可以编写`Dockerfile`使用`FROM paddledev/paddle:<version>`
创建和发布自己的AI程序镜像。
.. code-block:: bash
运行PaddlePaddle书籍
---------------------
if cat /proc/cpuinfo | grep -i avx; then echo Yes; else echo No; fi
Jupyter Notebook是一个开源的web程序大家可以通过它制作和分享带有代码、公式、图表、文字的交互式文档。用户可以通过网页浏览文档。
如果输出是No我们就需要手动编译一个非AVX版本的镜像
PaddlePaddle书籍是为用户和开发者制作的一个交互式的Jupyter Nodebook。
如果您想要更深入了解deep learningPaddlePaddle书籍一定是您最好的选择。
我们提供可以直接运行PaddlePaddle书籍的docker镜像直接运行
.. code-block:: bash
cd ~
git clone https://github.com/PaddlePaddle/Paddle.git
cd Paddle
docker build --build-arg WITH_AVX=OFF -t paddle:cpu-noavx -f paddle/scripts/docker/Dockerfile .
docker build --build-arg WITH_AVX=OFF -t paddle:gpu-noavx -f paddle/scripts/docker/Dockerfile.gpu .
docker run -p 8888:8888 paddlepaddle/book
然后在浏览器中输入以下网址:
.. code-block:: text
http://localhost:8888/
就这么简单,享受您的旅程!
通过Docker容器开发PaddlePaddle
------------------------------
开发人员可以在Docker中开发PaddlePaddle。这样开发人员可以以一致的方式在不同的平台上工作 - LinuxMac OS X和Windows。
开发人员可以在Docker开发镜像中开发PaddlePaddle。这样开发人员可以以一致的方式在不同的平台上工作 - LinuxMac OS X和Windows。
1. 构建开发镜像
1. 将开发环境构建为Docker镜像
.. code-block:: bash
git clone --recursive https://github.com/PaddlePaddle/Paddle
cd Paddle
docker build -t paddle:dev -f paddle/scripts/docker/Dockerfile .
docker build -t paddle:dev .
请注意,默认情况下,:code:`docker build` 不会将源码导入到镜像中并编译它。如果我们想这样做,需要设置一个参数
请注意,默认情况下,:code:`docker build` 不会将源码导入到镜像中并编译它。如果我们想这样做,需要构建完开发镜像,然后执行
.. code-block:: bash
docker build -t paddle:dev -f paddle/scripts/docker/Dockerfile --build-arg BUILD_AND_INSTALL=ON .
docker run -v $PWD:/paddle -e "WITH_GPU=OFF" -e "WITH_AVX=ON" -e "TEST=OFF" paddle:dev
2. 运行开发环境
当我们编译好了 :code:`paddle:dev` 我们可以在docker容器里做开发源代码可以通过挂载本地文件来被载入Docker的开发环境里面
.. code-block:: bash
docker run -d -p 2202:22 -v $PWD:/paddle paddle:dev
docker run -d -p 2202:22 -v $PWD:/paddle paddle:dev sshd
以上代码会启动一个带有PaddlePaddle开发环境的docker容器源代码会被挂载到 :code:`/paddle`
请注意, :code:`paddle:dev` 的默认入口是 :code:`sshd`以上的 :code:`docker run` 命令其实会启动一个在2202端口监听的SSHD服务器。这样我们就能SSH进入我们的开发容器了
以上的 :code:`docker run` 命令其实会启动一个在2202端口监听的SSHD服务器。这样我们就能SSH进入我们的开发容器了
.. code-block:: bash
ssh root@localhost -p 2202
@ -124,13 +144,13 @@ PaddlePaddle书籍是为用户和开发者制作的一个交互式的Jupyter Nod
3. 在Docker开发环境中编译与安装PaddlPaddle代码
当在容器里面的时候,可以用脚本 :code:`paddle/scripts/docker/build.sh` 来编译、安装与测试PaddlePaddle
.. code-block:: bash
/paddle/paddle/scripts/docker/build.sh
以上指令会在 :code:`/paddle/build` 中编译PaddlePaddle。通过以下指令可以运行单元测试
.. code-block:: bash
cd /paddle/build
@ -140,14 +160,14 @@ PaddlePaddle书籍是为用户和开发者制作的一个交互式的Jupyter Nod
文档
----
Paddle的Docker镜像带有一个通过 `woboq code browser
Paddle的Docker开发镜像带有一个通过 `woboq code browser
<https://github.com/woboq/woboq_codebrowser>`_ 生成的HTML版本的C++源代码便于用户浏览C++源码。
只要在Docker里启动PaddlePaddle的时候给它一个名字就可以再运行另一个Nginx Docker镜像来服务HTML代码
.. code-block:: bash
docker run -d --name paddle-cpu-doc paddle:0.10.0rc1-cpu
docker run -d --name paddle-cpu-doc paddle:<version>-dev
docker run -d --volumes-from paddle-cpu-doc -p 8088:80 nginx
接着我们就能够打开浏览器在 http://localhost:8088/paddle/ 浏览代码。

File diff suppressed because it is too large Load Diff

@ -46,7 +46,6 @@ PaddlePaddle提供了ubuntu 14.04 deb安装包。
with_double: OFF
with_python: ON
with_rdma: OFF
with_metric_learning:
with_timer: OFF
with_predict_sdk:

@ -9,13 +9,8 @@ add_subdirectory(pserver)
add_subdirectory(trainer)
add_subdirectory(scripts)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in
${CMAKE_CURRENT_SOURCE_DIR}/setup.py)
if(WITH_PREDICT_SDK)
add_subdirectory(predict)
endif()
if(WITH_SWIG_PY)
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/setup.py.in
${CMAKE_CURRENT_SOURCE_DIR}/setup.py)
add_subdirectory(api)
endif()

@ -76,8 +76,6 @@ SWIG_LINK_LIBRARIES(swig_paddle
${CMAKE_DL_LIBS}
${EXTERNAL_LIBS}
${CMAKE_THREAD_LIBS_INIT}
${RDMA_LD_FLAGS}
${RDMA_LIBS}
${START_END}
)

@ -159,4 +159,10 @@ extern void hl_sequence_avg_forward(real* dst,
int width,
const int mode);
extern void hl_sequence_avg_backward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode);
#endif /* HL_SEQUENCE_H_ */

@ -57,4 +57,10 @@ inline void hl_sequence_avg_forward(real* dst,
int width,
const int mode) {}
inline void hl_sequence_avg_backward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {}
#endif // HL_SEQUENCE_STUB_H_

@ -325,12 +325,12 @@ __global__ void KeSequenceAvgForward(real* dst,
int seqLength = end - start;
if (seqLength == 0) return;
real sum = 0.0;
for (int i = 0; i < seqLength; i++) {
sum += src[(start + i) * width + col];
for (int i = start; i < end; i++) {
sum += src[i * width + col];
}
sum = mode == 1 ? sum :
(mode == 0 ? sum / seqLength : sum * my_rsqrt((real)seqLength));
dst[row * width + col] = sum;
dst[gid] = sum;
}
}
@ -354,3 +354,48 @@ void hl_sequence_avg_forward(real* dst,
(dst, src, starts, height, width, mode);
CHECK_SYNC("hl_sequence_avg_forward failed");
}
__global__ void KeSequenceAvgBackward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int row = gid / width;
int col = gid % width;
if (gid < height * width) {
int start = starts[row];
int end = starts[row + 1];
int seqLength = end - start;
if (seqLength == 0) return;
real grad = src[gid];
grad = mode == 1 ? grad :
(mode == 0 ? grad / seqLength : grad * my_rsqrt((real)seqLength));
for (int i = start; i < end; i++) {
dst[i * width + col] += grad;
}
}
}
void hl_sequence_avg_backward(real* dst,
real* src,
const int* starts,
int height,
int width,
const int mode) {
CHECK_NOTNULL(dst);
CHECK_NOTNULL(src);
CHECK_NOTNULL(starts);
int block = 512;
int grid = DIVUP(width * height, 512);
CHECK(mode == 0 || mode == 1 || mode == 2)
<< "mode error in hl_sequence_avg_backward!";
KeSequenceAvgBackward<<< grid, block, 0, STREAM_DEFAULT >>>
(dst, src, starts, height, width, mode);
CHECK_SYNC("hl_sequence_avg_backward failed");
}

@ -26,8 +26,6 @@ bool AverageLayer::init(const LayerMap& layerMap,
const ParameterMap& parameterMap) {
SequencePoolLayer::init(layerMap, parameterMap);
dataMtx_ = Matrix::create(nullptr, 1, 1, false, useGpu_);
outMtx_ = Matrix::create(nullptr, 1, getSize(), false, useGpu_);
// average strategy
if (config_.average_strategy() == "average") {
mode_ = kAverage;
@ -60,43 +58,9 @@ void AverageLayer::forward(PassType passType) {
void AverageLayer::backward(const UpdateCallback& callback) {
SequencePoolLayer::backward(callback);
const int* starts = startPositions_->getData(false);
MatrixPtr grad = getInputGrad(0);
if (grad) {
size_t dim = getSize();
real* gradientData = getInputGrad(0)->getData();
real* gradient = getOutputGrad()->getData();
size_t numSequences = startPositions_->getSize() - 1;
for (size_t sequenceId = 0; sequenceId < numSequences; ++sequenceId) {
// TODO(Dangqingqing) optimization for GPU
int sequenceLength = starts[sequenceId + 1] - starts[sequenceId];
if (0 == sequenceLength) {
// empty sequence
continue;
}
dataMtx_->setData(
gradientData + starts[sequenceId] * dim, sequenceLength, dim);
outMtx_->setData(gradient + sequenceId * dim);
switch (mode_) {
case kAverage: {
// plain average
dataMtx_->addBias(*outMtx_, 1.0f / sequenceLength);
break;
}
case kSum: {
// sum instead of average
dataMtx_->addBias(*outMtx_, 1.0f);
break;
}
case kAverageSquareRootN: {
// divide by square root of sequenceLength
dataMtx_->addBias(*outMtx_, 1.0f / sqrt(sequenceLength));
break;
}
default: { LOG(FATAL) << "should not reach here"; }
}
}
if (getInputGrad(0)) {
getInputGrad(0)->sequenceAvgBackward(
*getOutputGrad(), *startPositions_->getVector(useGpu_), mode_);
}
}

@ -45,8 +45,6 @@ public:
void backward(const UpdateCallback& callback = nullptr) override;
protected:
MatrixPtr outMtx_;
MatrixPtr dataMtx_;
int mode_;
};
} // namespace paddle

@ -483,6 +483,20 @@ void GpuMatrix::sequenceAvgForward(Matrix& a,
hl_sequence_avg_forward(dst, src, starts, height, width, mode);
}
void GpuMatrix::sequenceAvgBackward(Matrix& a,
const IVector& startsPos,
int mode) {
size_t height = a.getHeight();
size_t width = getWidth();
CHECK_EQ(height, startsPos.getSize() - 1);
CHECK_EQ(width, a.getWidth());
real* dst = getData();
real* src = a.getData();
const int* starts = startsPos.getData();
hl_sequence_avg_backward(dst, src, starts, height, width, mode);
}
/* this = scaleAB*(a*b) + scaleT*this */
void GpuMatrix::mul(const GpuMatrix& a,
const GpuMatrix& b,
@ -2304,6 +2318,41 @@ void CpuMatrix::sequenceAvgForward(Matrix& a,
}
}
void CpuMatrix::sequenceAvgBackward(Matrix& a,
const IVector& startsPos,
int mode) {
size_t height = a.getHeight();
size_t width = getWidth();
CHECK_EQ(height, startsPos.getSize() - 1);
CHECK_EQ(width, a.getWidth());
real* dst = getData();
real* src = a.getData();
const int* starts = startsPos.getData();
MatrixPtr outMtx = Matrix::create(nullptr, 1, width, false, false);
MatrixPtr dataMtx = Matrix::create(nullptr, 1, width, false, false);
for (size_t i = 0; i < height; ++i) {
int sequenceLength = starts[i + 1] - starts[i];
if (0 == sequenceLength) {
// empty sequence
continue;
}
outMtx->setData(dst + starts[i] * width, sequenceLength, width);
dataMtx->setData(src + i * width);
if (mode == 0) {
// plain average
outMtx->addBias(*dataMtx, 1.0f / sequenceLength);
} else if (mode == 1) {
// sum instead of average
outMtx->addBias(*dataMtx, 1.0f);
} else if (mode == 2) {
// divide by square root of sequenceLength
outMtx->addBias(*dataMtx, 1.0f / std::sqrt(sequenceLength));
} else {
LOG(FATAL) << "should not reach here";
}
}
}
/* this = scaleAB*(a*b) + scaleT*this*/
void CpuMatrix::mul(const Matrix& a,
const Matrix& b,

@ -461,6 +461,12 @@ public:
LOG(FATAL) << "Not implemented";
}
virtual void sequenceAvgBackward(Matrix& a,
const IVector& startsPos,
int mode) {
LOG(FATAL) << "Not implemented";
}
/**
* @code
* this = scaleAB*(a*b) + scaleT*this
@ -1203,6 +1209,7 @@ public:
void collectSharedBias(Matrix& a, real scale);
void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode);
void sequenceAvgBackward(Matrix& a, const IVector& startsPos, int mode);
/**
* @code
@ -1619,6 +1626,7 @@ public:
void collectSharedBias(Matrix& a, real scale);
void sequenceAvgForward(Matrix& a, const IVector& startsPos, int mode);
void sequenceAvgBackward(Matrix& a, const IVector& startsPos, int mode);
/**
* @code

@ -685,7 +685,7 @@ TEST(SMatrix, topK) {
}
}
void testMatrixSequenceAvgForward(int batchSize, int inputDim, int mode) {
void testMatrixSequenceAvg(int batchSize, int inputDim, int mode) {
MatrixPtr cpuInput = std::make_shared<CpuMatrix>(batchSize, inputDim);
MatrixPtr gpuInput = std::make_shared<GpuMatrix>(batchSize, inputDim);
cpuInput->randomizeUniform();
@ -706,15 +706,25 @@ void testMatrixSequenceAvgForward(int batchSize, int inputDim, int mode) {
gpuOutput->sequenceAvgForward(*gpuInput, *gpuSequence, mode);
TensorCheckErr(*cpuOutput, *gpuOutput);
MatrixPtr cpuInGrad = std::make_shared<CpuMatrix>(batchSize, inputDim);
MatrixPtr gpuInGrad = std::make_shared<GpuMatrix>(batchSize, inputDim);
cpuInGrad->randomizeUniform();
gpuInGrad->copyFrom(*cpuInGrad);
cpuInGrad->sequenceAvgBackward(*cpuOutput, *cpuSequence, mode);
gpuInGrad->sequenceAvgBackward(*gpuOutput, *gpuSequence, mode);
TensorCheckErr(*cpuInGrad, *gpuInGrad);
}
TEST(Matrix, sequenceAvgForward) {
TEST(Matrix, sequenceAvg) {
for (auto batchSize : {10, 128, 6000}) {
for (auto inputDim : {32, 100, 512}) {
for (auto mode : {0, 1, 2}) {
VLOG(3) << " batchSize=" << batchSize << " inputDim=" << inputDim
<< " mode=" << mode;
testMatrixSequenceAvgForward(batchSize, inputDim, mode);
testMatrixSequenceAvg(batchSize, inputDim, mode);
}
}
}

@ -4,7 +4,7 @@ set -e
# Set BASE_IMAGE according to env variables
if [ ${WITH_GPU} == "ON" ]; then
BASE_IMAGE="nvidia/cuda:7.5-cudnn5-runtime-ubuntu14.04"
BASE_IMAGE="nvidia/cuda:8.0-cudnn5-runtime-ubuntu14.04"
# additional packages to install when building gpu images
GPU_DOCKER_PKG="python-pip python-dev"
else
@ -12,11 +12,10 @@ else
fi
DOCKERFILE_GPU_ENV=""
DOCKERFILE_CUDNN_DSO=""
if [[ ${WITH_GPU:-OFF} == 'ON' ]]; then
DOCKERFILE_GPU_ENV="ENV LD_LIBRARY_PATH /usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}"
# for cmake to find cudnn
ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so /usr/lib/libcudnn.so
DOCKERFILE_CUDNN_DSO="RUN ln -s /usr/lib/x86_64-linux-gnu/libcudnn.so.5 /usr/lib/x86_64-linux-gnu/libcudnn.so"
fi
mkdir -p /paddle/build
@ -95,7 +94,10 @@ RUN ${MIRROR_UPDATE}
# Use different deb file when building different type of images
ADD build/*.deb /usr/local/opt/paddle/deb/
# run paddle version to install python packages first
RUN dpkg -i /usr/local/opt/paddle/deb/*.deb && rm -f /usr/local/opt/paddle/deb/*.deb && paddle version
RUN dpkg -i /usr/local/opt/paddle/deb/*.deb && \
rm -f /usr/local/opt/paddle/deb/*.deb && \
paddle version
${DOCKERFILE_CUDNN_DSO}
${DOCKERFILE_GPU_ENV}
# default command shows the paddle version and exit
CMD ["paddle", "version"]

@ -21,9 +21,7 @@ function version(){
echo " with_double: @WITH_DOUBLE@"
echo " with_python: @WITH_PYTHON@"
echo " with_rdma: @WITH_RDMA@"
echo " with_metric_learning: @WITH_METRIC@"
echo " with_timer: @WITH_TIMER@"
echo " with_predict_sdk: @WITH_PREDICT_SDK@"
}
function ver2num() {

@ -0,0 +1,48 @@
from IPython import display
import os
class PlotCost(object):
"""
append train and test cost in event_handle and then call plot.
"""
def __init__(self):
self.train_costs = ([], [])
self.test_costs = ([], [])
self.__disable_plot__ = os.environ.get("DISABLE_PLOT")
if not self.__plot_is_disabled__():
import matplotlib.pyplot as plt
self.plt = plt
def __plot_is_disabled__(self):
return self.__disable_plot__ == "True"
def plot(self):
if self.__plot_is_disabled__():
return
self.plt.plot(*self.train_costs)
self.plt.plot(*self.test_costs)
title = []
if len(self.train_costs[0]) > 0:
title.append('Train Cost')
if len(self.test_costs[0]) > 0:
title.append('Test Cost')
self.plt.legend(title, loc='upper left')
display.clear_output(wait=True)
display.display(self.plt.gcf())
self.plt.gcf().clear()
def append_train_cost(self, step, cost):
self.train_costs[0].append(step)
self.train_costs[1].append(cost)
def append_test_cost(self, step, cost):
self.test_costs[0].append(step)
self.test_costs[1].append(cost)
def reset(self):
self.train_costs = ([], [])
self.test_costs = ([], [])
Loading…
Cancel
Save